diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000000..93c74283ba2fb --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,41 @@ +// For format details, see https:https://containers.dev/implementors/spec/. For config options, see the README at: +// https://github.com/devcontainers/images/tree/main/src/base-debian +{ + "name": "Kubernetes with e2e testing image build and push", + "image": "mcr.microsoft.com/devcontainers/base:bullseye", + // Setup the go environment and mount into the dev container at the expected location + "hostRequirements": { + "cpus": 4 + }, + // Copy over welcome message and install pyyaml + "onCreateCommand": "bash .devcontainer/setup.sh", + // for Kubernetes testing, suppress extraneous forwarding messages + "otherPortsAttributes": { + "onAutoForward": "silent" + }, + "remoteUser": "root", + // Install utils, go, docker-in-docker, and other tools + // For more info about dev container Features see https://containers.dev/features + "features": { + "ghcr.io/devcontainers/features/go:1": { + "version": "1.21.1", + "username": "root" + }, + // Install kubectl, but skip minikube and helm + "ghcr.io/devcontainers/features/kubectl-helm-minikube:1": { + "version": "latest", + "helm": "none", + "minikube": "none" + }, + // Enable running docker in docker + "ghcr.io/devcontainers/features/docker-in-docker:2": {}, + "ghcr.io/devcontainers/features/sshd:1": {}, + "ghcr.io/devcontainers/features/github-cli:1": {}, + // Python required for some hack tools + "ghcr.io/devcontainers/features/python:1": {}, + // TO DO - move these features from local to their projects, and publish them + "./local-features/kubectl-kind": {}, + "./local-features/etcd": {}, + "./local-features/kubetest2": {} + } +} \ No newline at end of file diff --git a/.devcontainer/local-features/etcd/devcontainer-feature.json b/.devcontainer/local-features/etcd/devcontainer-feature.json new file mode 100644 index 0000000000000..0db9284d3566e --- /dev/null +++ b/.devcontainer/local-features/etcd/devcontainer-feature.json @@ -0,0 +1,21 @@ +{ + "id": "etcd", + "name": "ETCD", + "descrption": "Operating etcd clusters for Kubernetes", + "options": { + "version": { + "type": "string", + "proposals": [ + "latest" + ], + "default": "latest", + "description": "Select or enter a version." + } + }, + "containerEnv": { + "PATH": "/usr/local/etcd:${PATH}" + }, + "installsAfter": [ + "ghcr.io/devcontainers/features/common-utils" + ] +} \ No newline at end of file diff --git a/.devcontainer/local-features/etcd/install.sh b/.devcontainer/local-features/etcd/install.sh new file mode 100644 index 0000000000000..93827e2071d29 --- /dev/null +++ b/.devcontainer/local-features/etcd/install.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Only supports Linux + +set -eux + +ETCD_VERSION="${VERSION:-"latest"}" + +# Figure out correct version of a three part version number is not passed +find_version_from_git_tags() { + local variable_name=$1 + local requested_version=${!variable_name} + if [ "${requested_version}" = "none" ]; then return; fi + local repository=$2 + local prefix=${3:-"tags/v"} + local separator=${4:-"."} + local last_part_optional=${5:-"false"} + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part + if [ "${last_part_optional}" = "true" ]; then + last_part="(${escaped_separator}[0-9]+)?" + else + last_part="${escaped_separator}[0-9]+" + fi + local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" + # shellcheck disable=SC2155 + local version_list="$(git ls-remote --tags "${repository}" | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + # shellcheck disable=SC2086 + declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" + else + set +e + # shellcheck disable=SC2086 + declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then + echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + exit 1 + fi + # shellcheck disable=SC1079 + echo "${variable_name}=${!variable_name}" +} + +apt-get update +apt-get -y install --no-install-recommends curl tar + +# Get closest match for version number specified +find_version_from_git_tags ETCD_VERSION "https://github.com/etcd-io/etcd" + +# Installs etcd in ./third_party/etcd +echo "Installing etcd ${ETCD_VERSION}..." + +architecture="$(uname -m)" +case "${architecture}" in + "x86_64") architecture="amd64" + ;; + "aarch64" | "armv8*") architecture="arm64" + ;; + *) echo "(!) Architecture ${architecture} unsupported"; + exit 1 + ;; +esac + +# shellcheck disable=SC1079 +FILE_NAME="etcd-v${ETCD_VERSION}-linux-${architecture}.tar.gz" +curl -sSL -o "${FILE_NAME}" "https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${FILE_NAME}" +tar xzf "${FILE_NAME}" + +# shellcheck disable=SC1073 +mv "etcd-v${ETCD_VERSION}-linux-amd64" /usr/local/etcd +rm -rf "${FILE_NAME}" + +# Installs etcd in /usr/bin so we don't have to futz with the path. +install -m755 /usr/local/etcd/etcd /usr/local/bin/etcd +install -m755 /usr/local/etcd/etcdctl /usr/local/bin/etcdctl +install -m755 /usr/local/etcd/etcdutl /usr/local/bin/etcdutl \ No newline at end of file diff --git a/.devcontainer/local-features/kubectl-kind/devcontainer-feature.json b/.devcontainer/local-features/kubectl-kind/devcontainer-feature.json new file mode 100644 index 0000000000000..ee04f957d0935 --- /dev/null +++ b/.devcontainer/local-features/kubectl-kind/devcontainer-feature.json @@ -0,0 +1,18 @@ +{ + "id": "kubectl-kind", + "name": "Kubectl kind", + "description": "Installs kind - a tool for running local Kubernetes clusters using Docker container 'nodes'", + "options": { + "version": { + "type": "string", + "proposals": [ + "latest" + ], + "default": "latest", + "description": "Select or enter a version." + } + }, + "installsAfter": [ + "ghcr.io/devcontainers/features/go" + ] +} \ No newline at end of file diff --git a/.devcontainer/local-features/kubectl-kind/install.sh b/.devcontainer/local-features/kubectl-kind/install.sh new file mode 100644 index 0000000000000..1d9e0dc783b58 --- /dev/null +++ b/.devcontainer/local-features/kubectl-kind/install.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# only supports Linux + +set -eux + +KIND_VERSION="${VERSION:-"latest"}" + +# Figure out correct version of a three part version number is not passed +find_version_from_git_tags() { + local variable_name=$1 + local requested_version=${!variable_name} + if [ "${requested_version}" = "none" ]; then return; fi + local repository=$2 + local prefix=${3:-"tags/v"} + local separator=${4:-"."} + local last_part_optional=${5:-"false"} + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part + if [ "${last_part_optional}" = "true" ]; then + last_part="(${escaped_separator}[0-9]+)?" + else + last_part="${escaped_separator}[0-9]+" + fi + local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" + # shellcheck disable=SC2155 + local version_list="$(git ls-remote --tags "${repository}" | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + # shellcheck disable=SC2086 + declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" + else + set +e + # shellcheck disable=SC2086 + declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then + echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + exit 1 + fi + echo "${variable_name}=${!variable_name}" +} + +# Clean up +rm -rf /var/lib/apt/lists/* + +# Get closest match for version number specified +find_version_from_git_tags KIND_VERSION "https://github.com/kubernetes-sigs/kind" + +echo "Installing kind ${KIND_VERSION}..." + +# Install kind +go install sigs.k8s.io/kind@v"${KIND_VERSION}" + +chown -R "${_REMOTE_USER}:golang" "${GOPATH}" +chmod -R g+r+w "${GOPATH}" +find "${GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s \ No newline at end of file diff --git a/.devcontainer/local-features/kubetest2/devcontainer-feature.json b/.devcontainer/local-features/kubetest2/devcontainer-feature.json new file mode 100644 index 0000000000000..70170e00d078a --- /dev/null +++ b/.devcontainer/local-features/kubetest2/devcontainer-feature.json @@ -0,0 +1,8 @@ +{ + "id": "kubetest2", + "name": "Kubetest2", + "description": "Kubetest2 a framework for deploying Kubernetes clusters and running end to end tests", + "installsAfter": [ + "ghcr.io/devcontainers/features/go" + ] +} \ No newline at end of file diff --git a/.devcontainer/local-features/kubetest2/install.sh b/.devcontainer/local-features/kubetest2/install.sh new file mode 100644 index 0000000000000..8f50cb76a0980 --- /dev/null +++ b/.devcontainer/local-features/kubetest2/install.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# only supports Linux + +set -eux + +# Clean up +rm -rf /var/lib/apt/lists/* + +echo "Installing kubetest2..." +go install sigs.k8s.io/kubetest2/...@latest + +apt-get update +apt-get -y install --no-install-recommends curl apt-transport-https ca-certificates gnupg +echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list + +curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo tee /usr/share/keyrings/cloud.google.gpg +apt-get update && apt-get install google-cloud-cli + +chown -R "${_REMOTE_USER}:golang" "${GOPATH}" +chmod -R g+r+w "${GOPATH}" +find "${GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s \ No newline at end of file diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100644 index 0000000000000..4a7cb9aa9c09d --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux + +cd "$(dirname "$0")" +cd ../ + +# Install pyyaml as required by verify.publishing-bot +python3 -m pip install --user --upgrade --no-cache-dir pyyaml + +# Copies over welcome message +cp .devcontainer/welcome-message.txt /usr/local/etc/vscode-dev-containers/first-run-notice.txt + +git remote add upstream https://github.com/kubernetes/kubernetes.git || true +# Never push to upstream master +git remote set-url --push upstream no_push \ No newline at end of file diff --git a/.devcontainer/welcome-message.txt b/.devcontainer/welcome-message.txt new file mode 100644 index 0000000000000..8072daf3f35f4 --- /dev/null +++ b/.devcontainer/welcome-message.txt @@ -0,0 +1,20 @@ +👋 Welcome to Kubernetes contribution in a dev container! + Works in GitHub Codespaces, VS Code, or in docker using the devcontainer cli + +See https://www.kubernetes.dev/docs/onboarding/ for guidance on contributing to Kuberentes + +This debian dev container image satisfies https://github.com/kubernetes/community/blob/master/contributors/devel/development.md and includes: + - kubernetes/kubernetes repository + - Docker + - go + - kubectl, etcd, kubetest2, and kind +For details about dev containers and the debian dev container base image see https://containers.dev and https://github.com/devcontainers/images/tree/main/src/base-debian. +The configuration for the dev container is in the .github/.devcontainer folder. (will be moved to prow) +🎵 By default in Codespaces this environment uses a 4-core machine. Some tests may require a larger machine. In Codespaces you can change the machine type. + See https://docs.github.com/en/codespaces/customizing-your-codespace/changing-the-machine-type-for-your-codespace + +⚙️ If you are working in Codespaces on your own fork, this environment is automatically configured to support the GitHub + workflow https://www.kubernetes.dev/docs/guide/github-workflow/ (omit the clone step) +↪️ Otherwise Codespaces will automatically fork the repository for you when you make your first push + +🔍 To explore VS Code to its fullest, search using the Command Palette (Cmd/Ctrl + Shift + P or F1). \ No newline at end of file diff --git a/.gitignore b/.gitignore index 844f250402516..32317a564c5b4 100644 --- a/.gitignore +++ b/.gitignore @@ -91,8 +91,9 @@ network_closure.sh /_tmp/ /doc_tmp/ -# Test artifacts produced by Jenkins jobs +# Test artifacts produced by Prow/kubetest2 jobs /_artifacts/ +/_rundir/ # Go dependencies installed on Jenkins /_gopath/ diff --git a/.go-version b/.go-version index 87967a790d613..49340f5ac7b4d 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.1 \ No newline at end of file +1.21.3 \ No newline at end of file diff --git a/CHANGELOG/CHANGELOG-1.25.md b/CHANGELOG/CHANGELOG-1.25.md index 61c108affc111..544032a094849 100644 --- a/CHANGELOG/CHANGELOG-1.25.md +++ b/CHANGELOG/CHANGELOG-1.25.md @@ -1,48 +1,49 @@ -- [v1.25.13](#v12513) - - [Downloads for v1.25.13](#downloads-for-v12513) +- [v1.25.15](#v12515) + - [Downloads for v1.25.15](#downloads-for-v12515) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - [Container Images](#container-images) - - [Changelog since v1.25.12](#changelog-since-v12512) - - [Important Security Information](#important-security-information) - - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [Changelog since v1.25.14](#changelog-since-v12514) - [Changes by Kind](#changes-by-kind) - [Feature](#feature) - [Bug or Regression](#bug-or-regression) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.25.12](#v12512) - - [Downloads for v1.25.12](#downloads-for-v12512) +- [v1.25.14](#v12514) + - [Downloads for v1.25.14](#downloads-for-v12514) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - [Container Images](#container-images-1) - - [Changelog since v1.25.11](#changelog-since-v12511) + - [Changelog since v1.25.13](#changelog-since-v12513) - [Changes by Kind](#changes-by-kind-1) + - [API Change](#api-change) - [Feature](#feature-1) - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.25.11](#v12511) - - [Downloads for v1.25.11](#downloads-for-v12511) +- [v1.25.13](#v12513) + - [Downloads for v1.25.13](#downloads-for-v12513) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - [Container Images](#container-images-2) - - [Changelog since v1.25.10](#changelog-since-v12510) - - [Important Security Information](#important-security-information-1) - - [CVE-2023-2728: Bypassing enforce mountable secrets policy imposed by the ServiceAccount admission plugin](#cve-2023-2728-bypassing-enforce-mountable-secrets-policy-imposed-by-the-serviceaccount-admission-plugin) + - [Changelog since v1.25.12](#changelog-since-v12512) + - [Important Security Information](#important-security-information) + - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - [Changes by Kind](#changes-by-kind-2) - [Feature](#feature-2) - [Bug or Regression](#bug-or-regression-2) @@ -50,77 +51,79 @@ - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.25.10](#v12510) - - [Downloads for v1.25.10](#downloads-for-v12510) +- [v1.25.12](#v12512) + - [Downloads for v1.25.12](#downloads-for-v12512) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - [Container Images](#container-images-3) - - [Changelog since v1.25.9](#changelog-since-v1259) + - [Changelog since v1.25.11](#changelog-since-v12511) - [Changes by Kind](#changes-by-kind-3) - - [API Change](#api-change) - [Feature](#feature-3) - [Bug or Regression](#bug-or-regression-3) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.25.9](#v1259) - - [Downloads for v1.25.9](#downloads-for-v1259) +- [v1.25.11](#v12511) + - [Downloads for v1.25.11](#downloads-for-v12511) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - [Container Images](#container-images-4) - - [Changelog since v1.25.8](#changelog-since-v1258) + - [Changelog since v1.25.10](#changelog-since-v12510) + - [Important Security Information](#important-security-information-1) + - [CVE-2023-2728: Bypassing enforce mountable secrets policy imposed by the ServiceAccount admission plugin](#cve-2023-2728-bypassing-enforce-mountable-secrets-policy-imposed-by-the-serviceaccount-admission-plugin) - [Changes by Kind](#changes-by-kind-4) - [Feature](#feature-4) - [Bug or Regression](#bug-or-regression-4) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-4) - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.25.8](#v1258) - - [Downloads for v1.25.8](#downloads-for-v1258) +- [v1.25.10](#v12510) + - [Downloads for v1.25.10](#downloads-for-v12510) - [Source Code](#source-code-5) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - [Container Images](#container-images-5) - - [Changelog since v1.25.7](#changelog-since-v1257) + - [Changelog since v1.25.9](#changelog-since-v1259) - [Changes by Kind](#changes-by-kind-5) + - [API Change](#api-change-1) - [Feature](#feature-5) - [Bug or Regression](#bug-or-regression-5) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-5) - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.25.7](#v1257) - - [Downloads for v1.25.7](#downloads-for-v1257) +- [v1.25.9](#v1259) + - [Downloads for v1.25.9](#downloads-for-v1259) - [Source Code](#source-code-6) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - [Container Images](#container-images-6) - - [Changelog since v1.25.6](#changelog-since-v1256) + - [Changelog since v1.25.8](#changelog-since-v1258) - [Changes by Kind](#changes-by-kind-6) - [Feature](#feature-6) - [Bug or Regression](#bug-or-regression-6) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.25.6](#v1256) - - [Downloads for v1.25.6](#downloads-for-v1256) +- [v1.25.8](#v1258) + - [Downloads for v1.25.8](#downloads-for-v1258) - [Source Code](#source-code-7) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - [Container Images](#container-images-7) - - [Changelog since v1.25.5](#changelog-since-v1255) + - [Changelog since v1.25.7](#changelog-since-v1257) - [Changes by Kind](#changes-by-kind-7) - [Feature](#feature-7) - [Bug or Regression](#bug-or-regression-7) @@ -128,14 +131,14 @@ - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) -- [v1.25.5](#v1255) - - [Downloads for v1.25.5](#downloads-for-v1255) +- [v1.25.7](#v1257) + - [Downloads for v1.25.7](#downloads-for-v1257) - [Source Code](#source-code-8) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - [Container Images](#container-images-8) - - [Changelog since v1.25.4](#changelog-since-v1254) + - [Changelog since v1.25.6](#changelog-since-v1256) - [Changes by Kind](#changes-by-kind-8) - [Feature](#feature-8) - [Bug or Regression](#bug-or-regression-8) @@ -143,33 +146,29 @@ - [Added](#added-8) - [Changed](#changed-8) - [Removed](#removed-8) -- [v1.25.4](#v1254) - - [Downloads for v1.25.4](#downloads-for-v1254) +- [v1.25.6](#v1256) + - [Downloads for v1.25.6](#downloads-for-v1256) - [Source Code](#source-code-9) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - [Container Images](#container-images-9) - - [Changelog since v1.25.3](#changelog-since-v1253) - - [Important Security Information](#important-security-information-2) - - [CVE-2022-3162: Unauthorized read of Custom Resources](#cve-2022-3162-unauthorized-read-of-custom-resources) - - [CVE-2022-3294: Node address isn't always verified when proxying](#cve-2022-3294-node-address-isnt-always-verified-when-proxying) + - [Changelog since v1.25.5](#changelog-since-v1255) - [Changes by Kind](#changes-by-kind-9) - - [API Change](#api-change-1) - [Feature](#feature-9) - [Bug or Regression](#bug-or-regression-9) - [Dependencies](#dependencies-9) - [Added](#added-9) - [Changed](#changed-9) - [Removed](#removed-9) -- [v1.25.3](#v1253) - - [Downloads for v1.25.3](#downloads-for-v1253) +- [v1.25.5](#v1255) + - [Downloads for v1.25.5](#downloads-for-v1255) - [Source Code](#source-code-10) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - [Container Images](#container-images-10) - - [Changelog since v1.25.2](#changelog-since-v1252) + - [Changelog since v1.25.4](#changelog-since-v1254) - [Changes by Kind](#changes-by-kind-10) - [Feature](#feature-10) - [Bug or Regression](#bug-or-regression-10) @@ -177,190 +176,438 @@ - [Added](#added-10) - [Changed](#changed-10) - [Removed](#removed-10) -- [v1.25.2](#v1252) - - [Downloads for v1.25.2](#downloads-for-v1252) +- [v1.25.4](#v1254) + - [Downloads for v1.25.4](#downloads-for-v1254) - [Source Code](#source-code-11) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - [Container Images](#container-images-11) - - [Changelog since v1.25.1](#changelog-since-v1251) + - [Changelog since v1.25.3](#changelog-since-v1253) + - [Important Security Information](#important-security-information-2) + - [CVE-2022-3162: Unauthorized read of Custom Resources](#cve-2022-3162-unauthorized-read-of-custom-resources) + - [CVE-2022-3294: Node address isn't always verified when proxying](#cve-2022-3294-node-address-isnt-always-verified-when-proxying) - [Changes by Kind](#changes-by-kind-11) + - [API Change](#api-change-2) + - [Feature](#feature-11) - [Bug or Regression](#bug-or-regression-11) - [Dependencies](#dependencies-11) - [Added](#added-11) - [Changed](#changed-11) - [Removed](#removed-11) -- [v1.25.1](#v1251) - - [Downloads for v1.25.1](#downloads-for-v1251) +- [v1.25.3](#v1253) + - [Downloads for v1.25.3](#downloads-for-v1253) - [Source Code](#source-code-12) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - [Node Binaries](#node-binaries-12) - [Container Images](#container-images-12) - - [Changelog since v1.25.0](#changelog-since-v1250) - - [Important Security Information](#important-security-information-3) - - [CVE-2022-3172: Aggregated API server can cause clients to be redirected (SSRF)](#cve-2022-3172-aggregated-api-server-can-cause-clients-to-be-redirected-ssrf) + - [Changelog since v1.25.2](#changelog-since-v1252) - [Changes by Kind](#changes-by-kind-12) - - [API Change](#api-change-2) - - [Feature](#feature-11) + - [Feature](#feature-12) - [Bug or Regression](#bug-or-regression-12) - [Dependencies](#dependencies-12) - [Added](#added-12) - [Changed](#changed-12) - [Removed](#removed-12) -- [v1.25.0](#v1250) - - [Downloads for v1.25.0](#downloads-for-v1250) +- [v1.25.2](#v1252) + - [Downloads for v1.25.2](#downloads-for-v1252) - [Source Code](#source-code-13) - [Client Binaries](#client-binaries-13) - [Server Binaries](#server-binaries-13) - [Node Binaries](#node-binaries-13) - [Container Images](#container-images-13) - - [Changelog since v1.24.0](#changelog-since-v1240) - - [What's New (Major Themes)](#whats-new-major-themes) - - [PodSecurityPolicy is Removed, Pod Security Admission graduates to Stable](#podsecuritypolicy-is-removed-pod-security-admission-graduates-to-stable) - - [Ephemeral Containers Graduate to Stable](#ephemeral-containers-graduate-to-stable) - - [Support for cgroups v2 Graduates to Stable](#support-for-cgroups-v2-graduates-to-stable) - - [Windows support improved](#windows-support-improved) - - [Moved container registry service from k8s.gcr.io to registry.k8s.io](#moved-container-registry-service-from-k8sgcrio-to-registryk8sio) - - [Promoted SeccompDefault to Beta](#promoted-seccompdefault-to-beta) - - [Promoted endPort in Network Policy to Stable](#promoted-endport-in-network-policy-to-stable) - - [Promoted Local Ephemeral Storage Capacity Isolation to Stable](#promoted-local-ephemeral-storage-capacity-isolation-to-stable) - - [Promoted core CSI Migration to Stable](#promoted-core-csi-migration-to-stable) - - [Promoted CSI Ephemeral Volume to Stable](#promoted-csi-ephemeral-volume-to-stable) - - [Promoted CRD Validation Expression Language to Beta](#promoted-crd-validation-expression-language-to-beta) - - [Promoted Server Side Unknown Field Validation to Beta](#promoted-server-side-unknown-field-validation-to-beta) - - [Introduced KMS v2](#introduced-kms-v2) - - [Kube-proxy images are now based on distroless images](#kube-proxy-images-are-now-based-on-distroless-images) - - [Known Issues](#known-issues) - - [LocalStorageCapacityIsolationFSQuotaMonitoring ConfigMap rendering failure](#localstoragecapacityisolationfsquotamonitoring-configmap-rendering-failure) - - [Urgent Upgrade Notes](#urgent-upgrade-notes) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Changelog since v1.25.1](#changelog-since-v1251) - [Changes by Kind](#changes-by-kind-13) - - [Deprecation](#deprecation) - - [API Change](#api-change-3) - - [Feature](#feature-12) - - [Documentation](#documentation) - - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-13) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-13) - [Added](#added-13) - [Changed](#changed-13) - [Removed](#removed-13) -- [v1.25.0-rc.1](#v1250-rc1) - - [Downloads for v1.25.0-rc.1](#downloads-for-v1250-rc1) +- [v1.25.1](#v1251) + - [Downloads for v1.25.1](#downloads-for-v1251) - [Source Code](#source-code-14) - [Client Binaries](#client-binaries-14) - [Server Binaries](#server-binaries-14) - [Node Binaries](#node-binaries-14) - [Container Images](#container-images-14) - - [Changelog since v1.25.0-rc.0](#changelog-since-v1250-rc0) + - [Changelog since v1.25.0](#changelog-since-v1250) + - [Important Security Information](#important-security-information-3) + - [CVE-2022-3172: Aggregated API server can cause clients to be redirected (SSRF)](#cve-2022-3172-aggregated-api-server-can-cause-clients-to-be-redirected-ssrf) - [Changes by Kind](#changes-by-kind-14) - - [Documentation](#documentation-1) + - [API Change](#api-change-3) + - [Feature](#feature-13) - [Bug or Regression](#bug-or-regression-14) - [Dependencies](#dependencies-14) - [Added](#added-14) - [Changed](#changed-14) - [Removed](#removed-14) -- [v1.25.0-rc.0](#v1250-rc0) - - [Downloads for v1.25.0-rc.0](#downloads-for-v1250-rc0) +- [v1.25.0](#v1250) + - [Downloads for v1.25.0](#downloads-for-v1250) - [Source Code](#source-code-15) - [Client Binaries](#client-binaries-15) - [Server Binaries](#server-binaries-15) - [Node Binaries](#node-binaries-15) - [Container Images](#container-images-15) - - [Changelog since v1.25.0-beta.0](#changelog-since-v1250-beta0) + - [Changelog since v1.24.0](#changelog-since-v1240) + - [What's New (Major Themes)](#whats-new-major-themes) + - [PodSecurityPolicy is Removed, Pod Security Admission graduates to Stable](#podsecuritypolicy-is-removed-pod-security-admission-graduates-to-stable) + - [Ephemeral Containers Graduate to Stable](#ephemeral-containers-graduate-to-stable) + - [Support for cgroups v2 Graduates to Stable](#support-for-cgroups-v2-graduates-to-stable) + - [Windows support improved](#windows-support-improved) + - [Moved container registry service from k8s.gcr.io to registry.k8s.io](#moved-container-registry-service-from-k8sgcrio-to-registryk8sio) + - [Promoted SeccompDefault to Beta](#promoted-seccompdefault-to-beta) + - [Promoted endPort in Network Policy to Stable](#promoted-endport-in-network-policy-to-stable) + - [Promoted Local Ephemeral Storage Capacity Isolation to Stable](#promoted-local-ephemeral-storage-capacity-isolation-to-stable) + - [Promoted core CSI Migration to Stable](#promoted-core-csi-migration-to-stable) + - [Promoted CSI Ephemeral Volume to Stable](#promoted-csi-ephemeral-volume-to-stable) + - [Promoted CRD Validation Expression Language to Beta](#promoted-crd-validation-expression-language-to-beta) + - [Promoted Server Side Unknown Field Validation to Beta](#promoted-server-side-unknown-field-validation-to-beta) + - [Introduced KMS v2](#introduced-kms-v2) + - [Kube-proxy images are now based on distroless images](#kube-proxy-images-are-now-based-on-distroless-images) + - [Known Issues](#known-issues) + - [LocalStorageCapacityIsolationFSQuotaMonitoring ConfigMap rendering failure](#localstoragecapacityisolationfsquotamonitoring-configmap-rendering-failure) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - [Changes by Kind](#changes-by-kind-15) + - [Deprecation](#deprecation) - [API Change](#api-change-4) + - [Feature](#feature-14) + - [Documentation](#documentation) + - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-15) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-15) - [Added](#added-15) - [Changed](#changed-15) - [Removed](#removed-15) -- [v1.25.0-beta.0](#v1250-beta0) - - [Downloads for v1.25.0-beta.0](#downloads-for-v1250-beta0) +- [v1.25.0-rc.1](#v1250-rc1) + - [Downloads for v1.25.0-rc.1](#downloads-for-v1250-rc1) - [Source Code](#source-code-16) - [Client Binaries](#client-binaries-16) - [Server Binaries](#server-binaries-16) - [Node Binaries](#node-binaries-16) - [Container Images](#container-images-16) - - [Changelog since v1.25.0-alpha.3](#changelog-since-v1250-alpha3) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.25.0-rc.0](#changelog-since-v1250-rc0) - [Changes by Kind](#changes-by-kind-16) - - [Deprecation](#deprecation-1) - - [API Change](#api-change-5) - - [Feature](#feature-13) + - [Documentation](#documentation-1) - [Bug or Regression](#bug-or-regression-16) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-16) - [Added](#added-16) - [Changed](#changed-16) - [Removed](#removed-16) -- [v1.25.0-alpha.3](#v1250-alpha3) - - [Downloads for v1.25.0-alpha.3](#downloads-for-v1250-alpha3) +- [v1.25.0-rc.0](#v1250-rc0) + - [Downloads for v1.25.0-rc.0](#downloads-for-v1250-rc0) - [Source Code](#source-code-17) - [Client Binaries](#client-binaries-17) - [Server Binaries](#server-binaries-17) - [Node Binaries](#node-binaries-17) - [Container Images](#container-images-17) - - [Changelog since v1.25.0-alpha.2](#changelog-since-v1250-alpha2) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) + - [Changelog since v1.25.0-beta.0](#changelog-since-v1250-beta0) - [Changes by Kind](#changes-by-kind-17) - - [Deprecation](#deprecation-2) - - [API Change](#api-change-6) - - [Feature](#feature-14) - - [Documentation](#documentation-2) + - [API Change](#api-change-5) - [Bug or Regression](#bug-or-regression-17) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-17) - [Added](#added-17) - [Changed](#changed-17) - [Removed](#removed-17) -- [v1.25.0-alpha.2](#v1250-alpha2) - - [Downloads for v1.25.0-alpha.2](#downloads-for-v1250-alpha2) +- [v1.25.0-beta.0](#v1250-beta0) + - [Downloads for v1.25.0-beta.0](#downloads-for-v1250-beta0) - [Source Code](#source-code-18) - [Client Binaries](#client-binaries-18) - [Server Binaries](#server-binaries-18) - [Node Binaries](#node-binaries-18) - [Container Images](#container-images-18) - - [Changelog since v1.25.0-alpha.1](#changelog-since-v1250-alpha1) + - [Changelog since v1.25.0-alpha.3](#changelog-since-v1250-alpha3) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) - [Changes by Kind](#changes-by-kind-18) - - [API Change](#api-change-7) + - [Deprecation](#deprecation-1) + - [API Change](#api-change-6) - [Feature](#feature-15) - - [Documentation](#documentation-3) - [Bug or Regression](#bug-or-regression-18) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) - [Dependencies](#dependencies-18) - [Added](#added-18) - [Changed](#changed-18) - [Removed](#removed-18) -- [v1.25.0-alpha.1](#v1250-alpha1) - - [Downloads for v1.25.0-alpha.1](#downloads-for-v1250-alpha1) +- [v1.25.0-alpha.3](#v1250-alpha3) + - [Downloads for v1.25.0-alpha.3](#downloads-for-v1250-alpha3) - [Source Code](#source-code-19) - [Client Binaries](#client-binaries-19) - [Server Binaries](#server-binaries-19) - [Node Binaries](#node-binaries-19) - [Container Images](#container-images-19) - - [Changelog since v1.24.0](#changelog-since-v1240-1) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-3) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-3) + - [Changelog since v1.25.0-alpha.2](#changelog-since-v1250-alpha2) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) - [Changes by Kind](#changes-by-kind-19) - - [Deprecation](#deprecation-3) - - [API Change](#api-change-8) + - [Deprecation](#deprecation-2) + - [API Change](#api-change-7) - [Feature](#feature-16) - - [Failing Test](#failing-test-1) + - [Documentation](#documentation-2) - [Bug or Regression](#bug-or-regression-19) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) - [Dependencies](#dependencies-19) - [Added](#added-19) - [Changed](#changed-19) - [Removed](#removed-19) +- [v1.25.0-alpha.2](#v1250-alpha2) + - [Downloads for v1.25.0-alpha.2](#downloads-for-v1250-alpha2) + - [Source Code](#source-code-20) + - [Client Binaries](#client-binaries-20) + - [Server Binaries](#server-binaries-20) + - [Node Binaries](#node-binaries-20) + - [Container Images](#container-images-20) + - [Changelog since v1.25.0-alpha.1](#changelog-since-v1250-alpha1) + - [Changes by Kind](#changes-by-kind-20) + - [API Change](#api-change-8) + - [Feature](#feature-17) + - [Documentation](#documentation-3) + - [Bug or Regression](#bug-or-regression-20) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-7) + - [Dependencies](#dependencies-20) + - [Added](#added-20) + - [Changed](#changed-20) + - [Removed](#removed-20) +- [v1.25.0-alpha.1](#v1250-alpha1) + - [Downloads for v1.25.0-alpha.1](#downloads-for-v1250-alpha1) + - [Source Code](#source-code-21) + - [Client Binaries](#client-binaries-21) + - [Server Binaries](#server-binaries-21) + - [Node Binaries](#node-binaries-21) + - [Container Images](#container-images-21) + - [Changelog since v1.24.0](#changelog-since-v1240-1) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-3) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-3) + - [Changes by Kind](#changes-by-kind-21) + - [Deprecation](#deprecation-3) + - [API Change](#api-change-9) + - [Feature](#feature-18) + - [Failing Test](#failing-test-1) + - [Bug or Regression](#bug-or-regression-21) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-8) + - [Dependencies](#dependencies-21) + - [Added](#added-21) + - [Changed](#changed-21) + - [Removed](#removed-21) +# v1.25.15 + + +## Downloads for v1.25.15 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes.tar.gz) | 6871520678c03cdab86e4a2daa2eb3a75af8dc9b8054e2ce786efa6debeb98bb46681e3a096996a8832516686bcf1ec13d48b5b666ef7f1b0811712cc5a3210a +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-src.tar.gz) | 42fcdbc7fde65dfd97c0606cf0dba2854f4795d69127bec93cc9d6c7e7258353117358fc88742ac11a87a43184a537d354f864c7aef4b191cc0a0df158663a82 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-darwin-amd64.tar.gz) | 1ba3de7dffed3fb374350b8aeb83687cfe4543cc37b3c3c2c78d318c042186e50c72d457d303fc950fa05803a4260f758de8a3834a8bb5d3d3f04df7ecae070c +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-darwin-arm64.tar.gz) | f26afb76ebc5fb7fb1bac81f15f0c7e4c382c7f9f1438e63ca327c1bfcaa01addc4807cbd067f1753ce82ecf8b0335e24b49bbbf2a03ca7bc7edcb6bcf307d06 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-linux-386.tar.gz) | 6a752cc7db963a0c515111a3ee83ef229a79478f4130613def4aa4bb76fd9017f090909b92a392583a6a53cb6bef5bb52f749ba0fbf87665cf16cdef359802b9 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-linux-amd64.tar.gz) | b5b098e3060014738f687fe063bf3842cb06c7594048acb40b33325c86441f2bf5a0373f0b7ca721f4e0c9e70a3201540747d83bbe6f8b98e180b44629a94443 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-linux-arm.tar.gz) | bada4d45443277f4378ede281e85f13c19c40b95e1d2787ce9cfcc387014c36da7e8be2a6c552ca907472aafb27641fa04d16fa9cec98ad4250def7a6e83b1e7 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-linux-arm64.tar.gz) | 76de56fcfc29b57e29265a6217e5a958208d8967c65517f17aceab0fc2b77a99f779f96b7e5651b932782e42c95d2346d6bc5768f68bd85294347fce9e0f07ee +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-linux-ppc64le.tar.gz) | 18364a4901843228a8627d137768217830c6e5ce4cb4af5dde34df24b1cd7b1007f48dda7977323d75422c66741a1e6df46f4aaa30da75a8c1d538957722a453 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-linux-s390x.tar.gz) | fc6c20bb932e24fc79fda41be2eed12915c510ace17bf6378378b9a994daf38461f694ac6894af0390720e3130d1fc9fc57dc29fda92ac367e294105d74c4d13 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-windows-386.tar.gz) | 1ee87cdeb421a86e50d14428309425bf45324b00c54ee336ad419a597e82efab21959f32b46c55716d6b2cb20a652b00df3c5902fea3f30d492859659dc405a7 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-windows-amd64.tar.gz) | a68f9ce70385225be174df2ffe8ef744af5de817e1b965d042e00a51c9a1d683208c4841302f2239cf0fad9f5b3dc75a43bab4ca2dd06888cc0d990d59313022 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-client-windows-arm64.tar.gz) | 0c9af476790947f715a4179952a0c03bb4c7e37ab7b13d0927d87b295a185fffbb997def5a3f0dd159b32b714ed1700425c64c8ce60ce65b8691a9bd7469076c + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-server-linux-amd64.tar.gz) | 7531e4516efbfd3f217a34860a737606d3f098b186736de8de864aae7037a67416905c585ae3df56b167f7c7fa38a82f05cc6268e37afa9a7335a7e4126f381e +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-server-linux-arm.tar.gz) | b1c89432094435666611577177a4900eaf7e7f7fd0cad0327b642cf040a33b53218ec92977bcca8530f3cea40fb54d7fd87371811924c2f735c9f0046541e625 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-server-linux-arm64.tar.gz) | 0f1cb1cf7e6c1be6a08de042a0677429b83114e0bf8686b017b428978d48e7f7c9f90482744430abb0b5919d3470aadd670592324259ce9a16a362646f1680b7 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-server-linux-ppc64le.tar.gz) | 3acbc9635350484ad938bc8926ae294cff5fb2911c16fb59426a4a97c22bd157485aeb5d8bcf51f8543b0067f1893e4edf225d56a4854a09656af06bf9404c5c +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-server-linux-s390x.tar.gz) | 5a2dc4d99627d1b6836bdd340144766674865fcedcc1d994aa3f96db9dc19ec3b01eea2eb0eead6ee9fbfa9161d52a81afe1e803910a9f3aad321d4a90f7223a + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-node-linux-amd64.tar.gz) | 09cf4cc4dce2eb753a52579379a851446ccd2604e73cfb8168d235d554e952bfa4ea1d2c3de96b7f286b3cff63e673de1354ea158ce69b52521efddd051268e7 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-node-linux-arm.tar.gz) | 1a6014e521c7691621fb10b2eb2697c671b76652b702cb8c8cabccce6d37fe5f39c8faf7284779f2f06096c543e1d880b704b0181bd1ff9969d67e1b95b383b7 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-node-linux-arm64.tar.gz) | 4c4176cb9a6401764d771b6b01d8f601163942fdb46e16e062ec0c1cea9e9318ebc9635d5755848ef03b67a6c4f72d4ca199bd6a7f0a531d0d421ecc8ab1951a +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-node-linux-ppc64le.tar.gz) | e0c77fb0f0a4097a95b72a7dbf579b6a519fe6b0ad04083d3ecdaa327f4c0479d259eb37b09a489aa3d3777bc9f39d19fe924d9d8fef883c431874a4bc160687 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-node-linux-s390x.tar.gz) | f37c39adab7bbd1935cfb912da9036d0769a561a3d5937fdc67e064a1f85dbeb19824b31309f2493a1d37a4b9d9b5caf6e19a873901a8ea05e0da19493525bf2 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.25.15/kubernetes-node-windows-amd64.tar.gz) | ed3553437414b71cb2e4044555837266bd356a9015a95100817b30082fd3c98d904d1c065f0288238b80e58b5f29feb07a1a9d25203fbe9f83f38d0af06c3831 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.25.15](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.25.15](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.25.15](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.25.15](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.25.15](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.25.14 + +## Changes by Kind + +### Feature + +- Kubernetes is now built with Go 1.20.10 ([#121150](https://github.com/kubernetes/kubernetes/pull/121150), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- Kubernetes is now built with Go 1.20.9 ([#121022](https://github.com/kubernetes/kubernetes/pull/121022), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Adds an opt-in mitigation for http/2 DOS vulnerabilities for CVE-2023-44487 and CVE-2023-39325 for the API server when the client is unauthenticated. The mitigation may be enabled by setting the `UnauthenticatedHTTP2DOSMitigation` feature gate to `true` (it is disabled by default). An API server fronted by an L7 load balancer that already mitigates these http/2 attacks may choose not to enable the kube-apiserver mitigation to avoid disrupting load balancer → kube-apiserver connections if http/2 requests from multiple clients share the same backend connection. An API server on a private network may choose not to enable the kube-apiserver mitigation to prevent performance regressions for unauthenticated clients. Authenticated requests rely on the fix in golang.org/x/net v0.17.0 alone. https://issue.k8s.io/121197 tracks further mitigation of http/2 attacks by authenticated clients. ([#121201](https://github.com/kubernetes/kubernetes/pull/121201), [@enj](https://github.com/enj)) [SIG API Machinery] +- Fix a bug in cronjob controller where already created jobs may be missing from the status. ([#120649](https://github.com/kubernetes/kubernetes/pull/120649), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Fixed a 1.25.12 regression where kube-controller-manager can crash when StatefulSet with Parallel policy and PVC labels is scaled up. ([#121187](https://github.com/kubernetes/kubernetes/pull/121187), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) [SIG Apps] +- Fixes a bug where Services using finalizers may hold onto ClusterIP and/or NodePort allocated resources for longer than expected if the finalizer is removed using the status subresource ([#120657](https://github.com/kubernetes/kubernetes/pull/120657), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Fixes creationTimestamp: null causing unnecessary writes to etcd ([#116865](https://github.com/kubernetes/kubernetes/pull/116865), [@alexzielenski](https://github.com/alexzielenski)) [SIG API Machinery and Testing] +- Revised the logic for DaemonSet rolling update to exclude nodes if scheduling constraints are not met. + This eliminates the problem of rolling updates to a DaemonSet getting stuck around tolerations. ([#120792](https://github.com/kubernetes/kubernetes/pull/120792), [@mochizuki875](https://github.com/mochizuki875)) [SIG Apps and Testing] +- Sometimes, the scheduler incorrectly placed a pod in the "unschedulable" queue instead of the "backoff" queue. This happened when some plugin previously declared the pod as "unschedulable" and then in a later attempt encounters some other error. Scheduling of that pod then got delayed by up to five minutes, after which periodic flushing moved the pod back into the "active" queue. ([#120334](https://github.com/kubernetes/kubernetes/pull/120334), [@pohly](https://github.com/pohly)) [SIG Scheduling] + +### Other (Cleanup or Flake) + +- Etcd: update to v3.5.9 ([#118077](https://github.com/kubernetes/kubernetes/pull/118077), [@nikhita](https://github.com/nikhita)) [SIG Cloud Provider, Cluster Lifecycle and Testing] +- Fixes an issue where the vsphere cloud provider will not trust a certificate if: + - The issuer of the certificate is unknown (x509.UnknownAuthorityError) + - The requested name does not match the set of authorized names (x509.HostnameError) + - The error surfaced after attempting a connection contains one of the substrings: "certificate is not trusted" or "certificate signed by unknown authority" ([#120765](https://github.com/kubernetes/kubernetes/pull/120765), [@MadhavJivrajani](https://github.com/MadhavJivrajani)) [SIG Architecture and Cloud Provider] +- Set the resolution for the job_controller_job_sync_duration_seconds metric from 4ms to 1min ([#120670](https://github.com/kubernetes/kubernetes/pull/120670), [@mimowo](https://github.com/mimowo)) [SIG Apps and Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/vmware/govmomi: [v0.30.0 → v0.30.6](https://github.com/vmware/govmomi/compare/v0.30.0...v0.30.6) +- golang.org/x/crypto: 3147a52 → v0.14.0 +- golang.org/x/net: v0.8.0 → v0.17.0 +- golang.org/x/sys: v0.6.0 → v0.13.0 +- golang.org/x/term: v0.6.0 → v0.13.0 +- golang.org/x/text: v0.8.0 → v0.13.0 + +### Removed +_Nothing has changed._ + + + +# v1.25.14 + + +## Downloads for v1.25.14 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes.tar.gz) | bd7cee7ac797ea79f9799e85ea82cdfa3f1c813e1aa3b10d0862f21b5c2eaba6640516157fdfba5d3640f8f480c3dc929cc1e7c751b77bb0325bcfbc15660962 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-src.tar.gz) | b7479a8d5b13810129c9018c51ff3ee5a4a5581aa81f169f74021c3e3da4182d0c7a97cc068f5bfae51aeef3a57d6f6bbb4a8629f7f9851e8143acd3718a6e2a + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-darwin-amd64.tar.gz) | f870f01b48ec33c86c9343f731ae53cb4aba1e98e470199d632768f159053c7fdb881e382d7b55adf41bb46156b57c53d7a493261fdb12391578efd8a9b93214 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-darwin-arm64.tar.gz) | 696c5ec6b7dd127d4cf5093eac29255e258000d8794b87b2b9270cf85a39eb09b455982141f75923cd2195bde004a5d574aa575a4c234e4e062551d1e0b320e7 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-linux-386.tar.gz) | 69bfc6e412a4f891e47be1cdb7ed7ffc403f3cf1a28834967676ebf9dd845f2f9d6881ced81897cc20ef70a0e300fff18812bcca41b8ac911988ab9a22b1b678 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-linux-amd64.tar.gz) | a545ff1486658255f24a65c067277b2341a0f0414671eee2b1e6b14561b74273a19ea367228eb1b95b8881b953a42c346655b7cb249e7062ab5a7bd91f89f2bd +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-linux-arm.tar.gz) | f9aebfccbc5dea0eb5fb0bb422d73f05f2cc49a385c54c5a0cf893dcb4f4284f2deb0c60ac6f016a4a670ad4efd79a4962ba59273849d472e39eec18fd1cfa4c +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-linux-arm64.tar.gz) | f7c50de7a9fd25e8045dc9bdefb3a51809a0bfb7a8fa71f90fd9ba4783a133bd8faa3bf11af80178b2cdf537d0640c118f934634eecea1b8cc6d73f0b6c6515d +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-linux-ppc64le.tar.gz) | dff3f42e1c8b04fdbafcc0cee7b14e05f56a35ec175b528ff12d2c7f838716017d6f9dca554bb46a7de831d2066ea3bc8702ec2b68fa8afe3579bcd41ed9f95a +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-linux-s390x.tar.gz) | ad5900e2355a5c6dbd7b4ae09b3f6e2443fc3b3178e8e8b6a1d0c5de62bf1ed58cffb8cb714d06a80dd8aeabb1f967ee540daa3de47c8245a9884175fb709d5f +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-windows-386.tar.gz) | 4e2a159b65708c4063d6acce8ba07546bd8f45a14315ed7885d9f20dd281c45987be97eec95c7d5d4523b6337eeca4a74a7598fb2c351ca52f52bf5755b20cee +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-windows-amd64.tar.gz) | e428049515f5700a4d6b9edc2b35ffc295a753ec1f6ba04700c68cef2c5f34ee43bd165fab6fa1d15b8ccda2bdec676b9b70fb57d55a8a1d4c48e07aca058186 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-client-windows-arm64.tar.gz) | fc718e8b96dfec640e3b84ff9d2ab35a812060b4804e32435230139f1915789d493ebfdc18fd0b5c4426d28ac4c6cb9e4e27100c064f6cc3e652b67807a1f19c + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-server-linux-amd64.tar.gz) | d9bb3eefe2868b60d73064b3d2feaf3405e11276507a59bf1a8ee5b656a048e945b46fe4c2a652764e2540928b6457530e0c78f389afe13b2cc981d068aeb3ef +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-server-linux-arm.tar.gz) | 67b2a97f491294004cf7151e3d6e5298b31b71e53a80f0503cad4d02fafaaf250a8ef53ff9733a96e629caacfc0c4a2177c883334b554ba970969348c7868987 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-server-linux-arm64.tar.gz) | 7e9229b1b94a97d47cfd8b9968743120c347d0e4b433e9bd535bc10108baf7e2a6f29d7b5c370aea1430799404b70d467258e350cfd771054f22ccf7f7811199 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-server-linux-ppc64le.tar.gz) | 4bf3ad8558a68048a78ad1a6ec7f52a6af8fa6a929233e76debbf1b0bfb883b72d5fd4d2528c20745c31ee72b78c897a884fb3de9cd01acc64218efd4eaec417 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-server-linux-s390x.tar.gz) | c7a0c14265a82d0ed6c00f1cac427d4e6ba0afaf045149c212e62fa7f177482ce2bd74d364420192d6d12014cda8913c68318bfe53cbefb640952d52f665d6cb + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-node-linux-amd64.tar.gz) | 878478d29f4c7dbe3bef15c483f81cad829facc8b640e96a224f849f7437bb154b9700f130a76fac22a764ecea6060d629ea0f8749b32920412080e165237126 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-node-linux-arm.tar.gz) | bae57eddf98d3d7f0efd187238a538fd79c184c9ec6d66c0ffa406af4773284c892ffabd7f4f29b7d56ed5f543c21732d8cb76cc004847a2a7c8f1c2760766c6 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-node-linux-arm64.tar.gz) | 900ad33636733498dfac78e2e6d052eed974cd4d68cd07f563b902d093ba9bf87b5921d8e0658d19e4ff8c4eb1d5629aaee3b2f8a80fe08d0efab3cffd707d2d +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-node-linux-ppc64le.tar.gz) | 08f7d1341899073546065e40d7ec406fd74cf5f1558e0c455f46c05c88da2d86b28be7f107b215dd8faa083dea412bfd5a8dc34d2f62814054fe53e33e1e9b4d +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-node-linux-s390x.tar.gz) | de4962563b9bef393e519eb4ea2c84e4cfb1c17c551a61647446f513c5cecda19c15c1514b4109fdb9c1b33afb48785ae8ccbd5445aa04fb74556fd9b301e4e2 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.25.14/kubernetes-node-windows-amd64.tar.gz) | f48f4bda057f0c0745145c27358134c072a4fa269e03da6e5dfe861f6b59bed2a956f5e39a1c5f68cb6c36e7ef38af6153a5c4769be5ddc1f9acbda012c17b3e + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.25.14](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.25.14](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.25.14](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.25.14](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.25.14](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.25.13 + +## Changes by Kind + +### API Change + +- Mark Job onPodConditions as optional in pod failure policy ([#120211](https://github.com/kubernetes/kubernetes/pull/120211), [@mimowo](https://github.com/mimowo)) [SIG API Machinery and Apps] + +### Feature + +- Kubernetes is now built with Go 1.20.8 ([#120497](https://github.com/kubernetes/kubernetes/pull/120497), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Cherry-pick #115769: Fix the problem Pod terminating stuck because of trying to umount not actual mounted dir. ([#119832](https://github.com/kubernetes/kubernetes/pull/119832), [@cartermckinnon](https://github.com/cartermckinnon)) [SIG Node and Storage] +- Fixes a bug where images pinned by the container runtime can be garbage collected by kubelet. ([#120056](https://github.com/kubernetes/kubernetes/pull/120056), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Fixes regression in 1.25.10 causing running pods with devices to be terminated if kubelet is restarted ([#119707](https://github.com/kubernetes/kubernetes/pull/119707), [@ffromani](https://github.com/ffromani)) [SIG Node and Testing] +- Ignore context canceled from validate and mutate webhook ([#120017](https://github.com/kubernetes/kubernetes/pull/120017), [@divyasri537](https://github.com/divyasri537)) [SIG API Machinery] +- Kubeadm: fix nil pointer when etcd member is already removed ([#120013](https://github.com/kubernetes/kubernetes/pull/120013), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + +### Other (Cleanup or Flake) + +- When retrieving event resources, the reportingController and reportingInstance fields in the event will contain values. ([#120065](https://github.com/kubernetes/kubernetes/pull/120065), [@HirazawaUi](https://github.com/HirazawaUi)) [SIG Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +_Nothing has changed._ + +### Removed +_Nothing has changed._ + + + # v1.25.13 diff --git a/CHANGELOG/CHANGELOG-1.26.md b/CHANGELOG/CHANGELOG-1.26.md index 3a46bd6973d17..ecfe6d11aec7b 100644 --- a/CHANGELOG/CHANGELOG-1.26.md +++ b/CHANGELOG/CHANGELOG-1.26.md @@ -1,83 +1,83 @@ -- [v1.26.8](#v1268) - - [Downloads for v1.26.8](#downloads-for-v1268) +- [v1.26.10](#v12610) + - [Downloads for v1.26.10](#downloads-for-v12610) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - [Container Images](#container-images) - - [Changelog since v1.26.7](#changelog-since-v1267) - - [Important Security Information](#important-security-information) - - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [Changelog since v1.26.9](#changelog-since-v1269) - [Changes by Kind](#changes-by-kind) - - [API Change](#api-change) - [Feature](#feature) - [Bug or Regression](#bug-or-regression) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.26.7](#v1267) - - [Downloads for v1.26.7](#downloads-for-v1267) +- [v1.26.9](#v1269) + - [Downloads for v1.26.9](#downloads-for-v1269) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - [Container Images](#container-images-1) - - [Changelog since v1.26.6](#changelog-since-v1266) + - [Changelog since v1.26.8](#changelog-since-v1268) - [Changes by Kind](#changes-by-kind-1) - - [API Change](#api-change-1) + - [API Change](#api-change) - [Feature](#feature-1) - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.26.6](#v1266) - - [Downloads for v1.26.6](#downloads-for-v1266) +- [v1.26.8](#v1268) + - [Downloads for v1.26.8](#downloads-for-v1268) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - [Container Images](#container-images-2) - - [Changelog since v1.26.5](#changelog-since-v1265) - - [Important Security Information](#important-security-information-1) - - [CVE-2023-2728: Bypassing enforce mountable secrets policy imposed by the ServiceAccount admission plugin](#cve-2023-2728-bypassing-enforce-mountable-secrets-policy-imposed-by-the-serviceaccount-admission-plugin) + - [Changelog since v1.26.7](#changelog-since-v1267) + - [Important Security Information](#important-security-information) + - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - [Changes by Kind](#changes-by-kind-2) + - [API Change](#api-change-1) - [Feature](#feature-2) - [Bug or Regression](#bug-or-regression-2) - [Dependencies](#dependencies-2) - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.26.5](#v1265) - - [Downloads for v1.26.5](#downloads-for-v1265) +- [v1.26.7](#v1267) + - [Downloads for v1.26.7](#downloads-for-v1267) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - [Container Images](#container-images-3) - - [Changelog since v1.26.4](#changelog-since-v1264) + - [Changelog since v1.26.6](#changelog-since-v1266) - [Changes by Kind](#changes-by-kind-3) - [API Change](#api-change-2) - [Feature](#feature-3) - - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-3) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.26.4](#v1264) - - [Downloads for v1.26.4](#downloads-for-v1264) +- [v1.26.6](#v1266) + - [Downloads for v1.26.6](#downloads-for-v1266) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - [Container Images](#container-images-4) - - [Changelog since v1.26.3](#changelog-since-v1263) + - [Changelog since v1.26.5](#changelog-since-v1265) + - [Important Security Information](#important-security-information-1) + - [CVE-2023-2728: Bypassing enforce mountable secrets policy imposed by the ServiceAccount admission plugin](#cve-2023-2728-bypassing-enforce-mountable-secrets-policy-imposed-by-the-serviceaccount-admission-plugin) - [Changes by Kind](#changes-by-kind-4) - [Feature](#feature-4) - [Bug or Regression](#bug-or-regression-4) @@ -85,153 +85,151 @@ - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.26.3](#v1263) - - [Downloads for v1.26.3](#downloads-for-v1263) +- [v1.26.5](#v1265) + - [Downloads for v1.26.5](#downloads-for-v1265) - [Source Code](#source-code-5) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - [Container Images](#container-images-5) - - [Changelog since v1.26.2](#changelog-since-v1262) + - [Changelog since v1.26.4](#changelog-since-v1264) - [Changes by Kind](#changes-by-kind-5) - [API Change](#api-change-3) - [Feature](#feature-5) - - [Failing Test](#failing-test-1) + - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-5) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-5) - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.26.2](#v1262) - - [Downloads for v1.26.2](#downloads-for-v1262) +- [v1.26.4](#v1264) + - [Downloads for v1.26.4](#downloads-for-v1264) - [Source Code](#source-code-6) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - [Container Images](#container-images-6) - - [Changelog since v1.26.1](#changelog-since-v1261) + - [Changelog since v1.26.3](#changelog-since-v1263) - [Changes by Kind](#changes-by-kind-6) - - [API Change](#api-change-4) - [Feature](#feature-6) - [Bug or Regression](#bug-or-regression-6) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.26.1](#v1261) - - [Downloads for v1.26.1](#downloads-for-v1261) +- [v1.26.3](#v1263) + - [Downloads for v1.26.3](#downloads-for-v1263) - [Source Code](#source-code-7) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - [Container Images](#container-images-7) - - [Changelog since v1.26.0](#changelog-since-v1260) + - [Changelog since v1.26.2](#changelog-since-v1262) - [Changes by Kind](#changes-by-kind-7) - - [API Change](#api-change-5) + - [API Change](#api-change-4) - [Feature](#feature-7) - - [Failing Test](#failing-test-2) + - [Failing Test](#failing-test-1) - [Bug or Regression](#bug-or-regression-7) - [Dependencies](#dependencies-7) - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) -- [v1.26.0](#v1260) - - [Downloads for v1.26.0](#downloads-for-v1260) +- [v1.26.2](#v1262) + - [Downloads for v1.26.2](#downloads-for-v1262) - [Source Code](#source-code-8) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - [Container Images](#container-images-8) - - [Changelog since v1.25.0](#changelog-since-v1250) - - [Urgent Upgrade Notes](#urgent-upgrade-notes) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Changelog since v1.26.1](#changelog-since-v1261) - [Changes by Kind](#changes-by-kind-8) - - [Deprecation](#deprecation) - - [API Change](#api-change-6) + - [API Change](#api-change-5) - [Feature](#feature-8) - - [Documentation](#documentation) - [Bug or Regression](#bug-or-regression-8) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-8) - [Added](#added-8) - [Changed](#changed-8) - [Removed](#removed-8) -- [v1.26.0-rc.1](#v1260-rc1) - - [Downloads for v1.26.0-rc.1](#downloads-for-v1260-rc1) +- [v1.26.1](#v1261) + - [Downloads for v1.26.1](#downloads-for-v1261) - [Source Code](#source-code-9) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - [Container Images](#container-images-9) - - [Changelog since v1.26.0-rc.0](#changelog-since-v1260-rc0) + - [Changelog since v1.26.0](#changelog-since-v1260) - [Changes by Kind](#changes-by-kind-9) + - [API Change](#api-change-6) + - [Feature](#feature-9) + - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-9) - [Dependencies](#dependencies-9) - [Added](#added-9) - [Changed](#changed-9) - [Removed](#removed-9) -- [v1.26.0-rc.0](#v1260-rc0) - - [Downloads for v1.26.0-rc.0](#downloads-for-v1260-rc0) +- [v1.26.0](#v1260) + - [Downloads for v1.26.0](#downloads-for-v1260) - [Source Code](#source-code-10) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - [Container Images](#container-images-10) - - [Changelog since v1.26.0-beta.0](#changelog-since-v1260-beta0) + - [Changelog since v1.25.0](#changelog-since-v1250) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - [Changes by Kind](#changes-by-kind-10) + - [Deprecation](#deprecation) - [API Change](#api-change-7) - - [Feature](#feature-9) + - [Feature](#feature-10) + - [Documentation](#documentation) - [Bug or Regression](#bug-or-regression-10) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-10) - [Added](#added-10) - [Changed](#changed-10) - [Removed](#removed-10) -- [v1.26.0-beta.0](#v1260-beta0) - - [Downloads for v1.26.0-beta.0](#downloads-for-v1260-beta0) +- [v1.26.0-rc.1](#v1260-rc1) + - [Downloads for v1.26.0-rc.1](#downloads-for-v1260-rc1) - [Source Code](#source-code-11) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - [Container Images](#container-images-11) - - [Changelog since v1.26.0-alpha.3](#changelog-since-v1260-alpha3) + - [Changelog since v1.26.0-rc.0](#changelog-since-v1260-rc0) - [Changes by Kind](#changes-by-kind-11) - - [Deprecation](#deprecation-1) - - [API Change](#api-change-8) - - [Feature](#feature-10) - [Bug or Regression](#bug-or-regression-11) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-11) - [Added](#added-11) - [Changed](#changed-11) - [Removed](#removed-11) -- [v1.26.0-alpha.3](#v1260-alpha3) - - [Downloads for v1.26.0-alpha.3](#downloads-for-v1260-alpha3) +- [v1.26.0-rc.0](#v1260-rc0) + - [Downloads for v1.26.0-rc.0](#downloads-for-v1260-rc0) - [Source Code](#source-code-12) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - [Node Binaries](#node-binaries-12) - [Container Images](#container-images-12) - - [Changelog since v1.26.0-alpha.2](#changelog-since-v1260-alpha2) + - [Changelog since v1.26.0-beta.0](#changelog-since-v1260-beta0) - [Changes by Kind](#changes-by-kind-12) - - [API Change](#api-change-9) + - [API Change](#api-change-8) - [Feature](#feature-11) - [Bug or Regression](#bug-or-regression-12) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-12) - [Added](#added-12) - [Changed](#changed-12) - [Removed](#removed-12) -- [v1.26.0-alpha.2](#v1260-alpha2) - - [Downloads for v1.26.0-alpha.2](#downloads-for-v1260-alpha2) +- [v1.26.0-beta.0](#v1260-beta0) + - [Downloads for v1.26.0-beta.0](#downloads-for-v1260-beta0) - [Source Code](#source-code-13) - [Client Binaries](#client-binaries-13) - [Server Binaries](#server-binaries-13) - [Node Binaries](#node-binaries-13) - [Container Images](#container-images-13) - - [Changelog since v1.26.0-alpha.1](#changelog-since-v1260-alpha1) + - [Changelog since v1.26.0-alpha.3](#changelog-since-v1260-alpha3) - [Changes by Kind](#changes-by-kind-13) - - [Deprecation](#deprecation-2) - - [API Change](#api-change-10) + - [Deprecation](#deprecation-1) + - [API Change](#api-change-9) - [Feature](#feature-12) - [Bug or Regression](#bug-or-regression-13) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) @@ -239,30 +237,286 @@ - [Added](#added-13) - [Changed](#changed-13) - [Removed](#removed-13) -- [v1.26.0-alpha.1](#v1260-alpha1) - - [Downloads for v1.26.0-alpha.1](#downloads-for-v1260-alpha1) +- [v1.26.0-alpha.3](#v1260-alpha3) + - [Downloads for v1.26.0-alpha.3](#downloads-for-v1260-alpha3) - [Source Code](#source-code-14) - [Client Binaries](#client-binaries-14) - [Server Binaries](#server-binaries-14) - [Node Binaries](#node-binaries-14) - [Container Images](#container-images-14) - - [Changelog since v1.25.0](#changelog-since-v1250-1) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.26.0-alpha.2](#changelog-since-v1260-alpha2) - [Changes by Kind](#changes-by-kind-14) - - [Deprecation](#deprecation-3) - - [API Change](#api-change-11) + - [API Change](#api-change-10) - [Feature](#feature-13) - - [Documentation](#documentation-1) - [Bug or Regression](#bug-or-regression-14) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) - [Dependencies](#dependencies-14) - [Added](#added-14) - [Changed](#changed-14) - [Removed](#removed-14) +- [v1.26.0-alpha.2](#v1260-alpha2) + - [Downloads for v1.26.0-alpha.2](#downloads-for-v1260-alpha2) + - [Source Code](#source-code-15) + - [Client Binaries](#client-binaries-15) + - [Server Binaries](#server-binaries-15) + - [Node Binaries](#node-binaries-15) + - [Container Images](#container-images-15) + - [Changelog since v1.26.0-alpha.1](#changelog-since-v1260-alpha1) + - [Changes by Kind](#changes-by-kind-15) + - [Deprecation](#deprecation-2) + - [API Change](#api-change-11) + - [Feature](#feature-14) + - [Bug or Regression](#bug-or-regression-15) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Dependencies](#dependencies-15) + - [Added](#added-15) + - [Changed](#changed-15) + - [Removed](#removed-15) +- [v1.26.0-alpha.1](#v1260-alpha1) + - [Downloads for v1.26.0-alpha.1](#downloads-for-v1260-alpha1) + - [Source Code](#source-code-16) + - [Client Binaries](#client-binaries-16) + - [Server Binaries](#server-binaries-16) + - [Node Binaries](#node-binaries-16) + - [Container Images](#container-images-16) + - [Changelog since v1.25.0](#changelog-since-v1250-1) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changes by Kind](#changes-by-kind-16) + - [Deprecation](#deprecation-3) + - [API Change](#api-change-12) + - [Feature](#feature-15) + - [Documentation](#documentation-1) + - [Bug or Regression](#bug-or-regression-16) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-7) + - [Dependencies](#dependencies-16) + - [Added](#added-16) + - [Changed](#changed-16) + - [Removed](#removed-16) +# v1.26.10 + + +## Downloads for v1.26.10 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes.tar.gz) | 71e444111cb3295ea6571f963e3b687df70ffb0f23bbebf7740841e2ac6cd3aa9bc10b022ea8f918da3422c4c989d818268be38066e6f26194e581e7362129ed +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-src.tar.gz) | f6bd86c318bf092d37a6965a0ae575f7beab73c4295f193bed34f0c82a18901cb5b9119102ae6967f618ab5797e82e98a06a6171dc18c73d2eee2d2a334bb0fe + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-darwin-amd64.tar.gz) | 12797b69f528c045310d2fe35b239f1265e926cc0110c2a2ce2855f96f46cb2e75a37564fe873d3d071911b56b81133850ec38c40a89b051fc18e1a733d40fa2 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-darwin-arm64.tar.gz) | c5579815c549443d566824513bc51e73ecaed1229a371f906014cb9842cc40b0f64ec2d22ba728d746577c7f83cb16a787bcb5764036328d25c4f2f1d77112d0 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-linux-386.tar.gz) | f7500b9e32499e122fc78a7b79bc778f45bce2fd268201fa871c64df097d9549a43d466a185f87df9b6a67bc5cdcc9ed08053ed0c673b59b7dbf81105b553990 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-linux-amd64.tar.gz) | e0297612af35b05329f16528c458764b975136339a6a282bf8c3ef9c8a68a6c425f5e9a00c473f11862412e500ced3042e1a84224c9a4ba476b28f0a9d6fa585 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-linux-arm.tar.gz) | 3ccb33046bd5504f6fadb2a3696b73dc66e00cfd8be27da29eda7240a4d67c624543a2ae0793d312e26418a0025967df53eb0c8351b1888ecaa80faddf968795 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-linux-arm64.tar.gz) | 170dc0dc0821ac34e26687327d3c1da88dd5ddb64704d715a708caa773729e998f9a14d313105e067e0be07c6b07d0563af9023a53b73aa1e4ca2746f7bf2229 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-linux-ppc64le.tar.gz) | 012f103c0d191f06d79568e5faec166fcd4040771892a2fe4cbf4a76484720c8aaa7933bdd1f7812cb6cf93982f138434abd5e83fbabc4408f3950454510d872 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-linux-s390x.tar.gz) | 7decbde676b50c87eec4ff3dd3a7affd980d23da5e6849dc6b45f04d7408a5fa3a4656995e8b1640d834ea873da826bde102d3a4991de3d584c91533c0f5969a +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-windows-386.tar.gz) | cbcd7b9b7a1367906ec457e6fcfbff5bbe6ab999727dffd6eaad4bbf1e3aafab27a13325db252880ee87acbcb90601014b1dc0ea58790da54e9dda21895ef9e1 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-windows-amd64.tar.gz) | d07390c8e14695c2a6567723e19c54eeec0ec283ccd6758e76621251caa6ba9a1d039bc34d118adc43f298f41f85c3a0334c116bb5c5c0cd0f0170fdfc16a3d7 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-client-windows-arm64.tar.gz) | 554591e9520904deeada4a81cc256e5f5c2d34fa58b1a693368dd779528ba022edc33b7c37d16e0f746c70b8d815da7e386622e0096711794cdfe462a61ebbec + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-server-linux-amd64.tar.gz) | 1e334ccd1b1ee8189be7c11e1fe76df91b1380eea3f8e293e1af53f05c7c9d6384e322e26114a067ec3095d1a55b2b8301e72ea8842cb35e5d48156fe502595e +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-server-linux-arm.tar.gz) | 117019118a460154a1bbf86f1b2aa8912cfd8715db242b4c1cfb13b42738055c39c7cf861b5dfbde10e08e48a5cb0e1e9044e4cedafe42fd7058b33507492d65 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-server-linux-arm64.tar.gz) | 2e46bebefcd3fd84fa572c8501b06ec9312cde439e40cc3677cd7fe7f3035e5c899e491cabb881a42d487dbef1c48f923cd55f7a611df2ea5a9b70935b86c860 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-server-linux-ppc64le.tar.gz) | 701bdf04d0baf2e9d164ddff1e3f52e58f716c8c6a66e91e8d651102344cf8be623009b476e2a91824cc918afff82a9822fbb6d87250f232e2806a545402d875 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-server-linux-s390x.tar.gz) | 3299cc0265c74536a33856c28300268cdadf7311dbe863cc786c880410009a9157d99a5f5078fb9588a9a21e2a899be1916a3bbbcd09eae4e81c2a2633a637e8 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-node-linux-amd64.tar.gz) | ce2d55c178d31e5890a67a7d75bde7aaf6cd212178dc394ff32e9bb7953296d57d22e8e27a23d5f6eac36125040763aa37aebcd4fd0ca576635c232f6d0aa3c9 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-node-linux-arm.tar.gz) | 21f15f578ae70c6b40d59a3372c074facc5210b171f569ec085cffb08376e8c83aec0e53e4b36bf910e2afe9ffe6bff956987a7e9d7749641b552c9cd4d1b997 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-node-linux-arm64.tar.gz) | b95440ceb06f4f4a7ee4d84d0ef3e3a8c290d1afd6476db1eef84aa7662f309ba4c71b42169c56d03431460b0576bb93f5eccd9542a9b730a1ce41c06d115dda +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-node-linux-ppc64le.tar.gz) | f7cc54cffdf8c2a7dcea6e24e31e3036fa20ecd43801ea64625071ebe23b61a4b12c9c21e91b5fd79aed5b4e7ee95c8cb2e9414657689446e31c55b95bfa9f99 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-node-linux-s390x.tar.gz) | c4be2f3657c940576469e6ba052bcfd8f78cb4582f8a8035aa11fe93ef231245cc4d41475ff791dc928e3c60fcda22b3a4be5669ed0fcc6b66e3b0587367bf57 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.26.10/kubernetes-node-windows-amd64.tar.gz) | 27d7717e91851f3311435d3227f4db954867a4e76640d0fd43825bdeb614d20ca0f3cc0bc5e50afcf2f0c67208c146f238c9f1584f5206e385a9a3d732befc26 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.26.10](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.26.10](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.26.10](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.26.10](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.26.10](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.26.9 + +## Changes by Kind + +### Feature + +- Kubernetes is now built with Go 1.20.10 ([#121151](https://github.com/kubernetes/kubernetes/pull/121151), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- Kubernetes is now built with Go 1.20.9 ([#121023](https://github.com/kubernetes/kubernetes/pull/121023), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Adds an opt-in mitigation for http/2 DOS vulnerabilities for CVE-2023-44487 and CVE-2023-39325 for the API server when the client is unauthenticated. The mitigation may be enabled by setting the `UnauthenticatedHTTP2DOSMitigation` feature gate to `true` (it is disabled by default). An API server fronted by an L7 load balancer that already mitigates these http/2 attacks may choose not to enable the kube-apiserver mitigation to avoid disrupting load balancer → kube-apiserver connections if http/2 requests from multiple clients share the same backend connection. An API server on a private network may choose not to enable the kube-apiserver mitigation to prevent performance regressions for unauthenticated clients. Authenticated requests rely on the fix in golang.org/x/net v0.17.0 alone. https://issue.k8s.io/121197 tracks further mitigation of http/2 attacks by authenticated clients. ([#121200](https://github.com/kubernetes/kubernetes/pull/121200), [@enj](https://github.com/enj)) [SIG API Machinery] +- Fix a bug in cronjob controller where already created jobs may be missing from the status. ([#120649](https://github.com/kubernetes/kubernetes/pull/120649), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Fixed a 1.26.7 regression where kube-controller-manager can crash when StatefulSet with Parallel policy and PVC labels is scaled up. ([#121186](https://github.com/kubernetes/kubernetes/pull/121186), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) [SIG Apps] +- Fixed attaching volumes after detach errors. Now volumes that failed to detach are not treated as attached, Kubernetes will make sure they are fully attached before they can be used by pods. ([#120595](https://github.com/kubernetes/kubernetes/pull/120595), [@jsafrane](https://github.com/jsafrane)) [SIG Apps and Storage] +- Fixes a bug where Services using finalizers may hold onto ClusterIP and/or NodePort allocated resources for longer than expected if the finalizer is removed using the status subresource ([#120656](https://github.com/kubernetes/kubernetes/pull/120656), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Fixes creationTimestamp: null causing unnecessary writes to etcd ([#116865](https://github.com/kubernetes/kubernetes/pull/116865), [@alexzielenski](https://github.com/alexzielenski)) [SIG API Machinery and Testing] +- Revised the logic for DaemonSet rolling update to exclude nodes if scheduling constraints are not met. + This eliminates the problem of rolling updates to a DaemonSet getting stuck around tolerations. ([#120789](https://github.com/kubernetes/kubernetes/pull/120789), [@mochizuki875](https://github.com/mochizuki875)) [SIG Apps and Testing] +- Sometimes, the scheduler incorrectly placed a pod in the "unschedulable" queue instead of the "backoff" queue. This happened when some plugin previously declared the pod as "unschedulable" and then in a later attempt encounters some other error. Scheduling of that pod then got delayed by up to five minutes, after which periodic flushing moved the pod back into the "active" queue. ([#120334](https://github.com/kubernetes/kubernetes/pull/120334), [@pohly](https://github.com/pohly)) [SIG Scheduling] + +### Other (Cleanup or Flake) + +- Etcd: update to v3.5.9 ([#118078](https://github.com/kubernetes/kubernetes/pull/118078), [@nikhita](https://github.com/nikhita)) [SIG Cloud Provider, Cluster Lifecycle and Testing] +- Fixes an issue where the vsphere cloud provider will not trust a certificate if: + - The issuer of the certificate is unknown (x509.UnknownAuthorityError) + - The requested name does not match the set of authorized names (x509.HostnameError) + - The error surfaced after attempting a connection contains one of the substrings: "certificate is not trusted" or "certificate signed by unknown authority" ([#120766](https://github.com/kubernetes/kubernetes/pull/120766), [@MadhavJivrajani](https://github.com/MadhavJivrajani)) [SIG Architecture and Cloud Provider] +- Set the resolution for the job_controller_job_sync_duration_seconds metric from 4ms to 1min ([#120669](https://github.com/kubernetes/kubernetes/pull/120669), [@mimowo](https://github.com/mimowo)) [SIG Apps and Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/vmware/govmomi: [v0.30.0 → v0.30.6](https://github.com/vmware/govmomi/compare/v0.30.0...v0.30.6) +- golang.org/x/crypto: v0.1.0 → v0.14.0 +- golang.org/x/net: v0.8.0 → v0.17.0 +- golang.org/x/sys: v0.6.0 → v0.13.0 +- golang.org/x/term: v0.6.0 → v0.13.0 +- golang.org/x/text: v0.8.0 → v0.13.0 + +### Removed +_Nothing has changed._ + + + +# v1.26.9 + + +## Downloads for v1.26.9 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes.tar.gz) | b3c250c125d1851a8eb83022d0a32ee6906ef8c7ca405ed481da950401c87c4c00dd5db35583b2db5b1bc8857c0ec768b77d145885c02fe5288b792f72cc3732 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-src.tar.gz) | 0a6a8f9c09283aa6bcc6619adfdff8fe6d104c1b7cf00428036bd870f11c8c8554381df69fd88e9559a688bdd3f6555b334c126a2a288cfc384c32d8047e66cd + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-darwin-amd64.tar.gz) | c3dc15dbc38e9a9c0d4023a5c30173e7a9392f819e4d6fcfc5ec2587215ce02827e29b54c9c28ae721864bd690077b3c429be00d8fb4ca37892a1c2281948cec +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-darwin-arm64.tar.gz) | 0ca3b195ad56d270e809a8b401e09dfb1a231200f305b61edc142842fffbfc92bbe8bf3eeb4a7606c6598b0f736d7d8b7b61d260d79069444d27ea555b93dd04 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-linux-386.tar.gz) | 8c6d2c075997bc437744ab2018fae8d647f49a5bf52b7eb7723a2bc4ab36838a33ed191a22baa2cc422a9cfd18c1846c71d5b6e80c4648c00753c7fa8edff920 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-linux-amd64.tar.gz) | d23077cbbea4177d889f5cfe821d954665a26779a259f77401589b1a10a2c18834008fb58439f57359ad00d1070715f68f28dd1fa0ecfb05736542daa8f47281 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-linux-arm.tar.gz) | f99ab562480a0c5df27caf9ca3569bf1861570219b2b7670e1496571b84f35726f1678dc094b7d53cdd206ad6b2f64ed94c0c1a317015090b98c1277343386cd +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-linux-arm64.tar.gz) | bae2adb54d241389243a946f068fa65294ba8b9531aea316a81ab86aee36ca2a5c5c2bc9766287ebf9c1c22e3d101780e417bef688cdc85620776e071606c36e +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-linux-ppc64le.tar.gz) | fef71798f06c676e07de13c573160f93d3d8dc11ecd8fdcbef8d9f4276090bbaae947d9d32097521825f107a2b815d13cc8a62dbbf2e525ee8db1e229c0c9934 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-linux-s390x.tar.gz) | f8efde4546532c33402dea989f55b670f8d7fd9b5197644830545f460fbd30e81796f3a8aea04747dc82a1ff7c6f9eb523d14327d6db641d109278d80faf16b5 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-windows-386.tar.gz) | 2fa7535792ccf0a5b012de7506319708966538bf0e785f9af6a8b241c9940ae50241a1a6121abeeef82ac00e0117e4d9352156b9b089ea292be015b508332bd9 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-windows-amd64.tar.gz) | 134a8478a2dce0dff8090789aa35c80d3e4600d6b7e58ac11d5688932297caf7961601a51e75f780d84b7101d6a646807190455e787835baefc5f606d4f54e2b +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-client-windows-arm64.tar.gz) | 137b08868f2d140bbc49389d6aaedb2e4a40dce954f9f2b209debee4598692ea2604cdd75d6a541c45f7ca692b5142bdf46eb8722673ef13ad980f79edf0a566 + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-server-linux-amd64.tar.gz) | 70f7a1792c44ec3cc55ca2ab2d4baf9bdb2ef65e3e95f0bd16698d01243a4955e7b1a8a7b4ec6d93d1f93bea30f4fc9aeccdfb61446d30ce0088ea6d2ae4a695 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-server-linux-arm.tar.gz) | 17eba51ee774321385ad85b31eab3626255905b1c29d010dc258d6e171f08e78553e28881249717b9759de47a64403019e35df52e5479336cc908a9af735afc5 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-server-linux-arm64.tar.gz) | d298567bc35009b188cff37944630771fbe27b98fff34d1b30ba4d2070e00cc3d398548e3134b6ed01b2ad1e9efb56946e0ca08188fe688d6e1cb60a2c02dcc2 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-server-linux-ppc64le.tar.gz) | f0937964bef8a277256509ce383ac46845df6eeacfb87be55ffad2de518c1e1ed2c51d88ff0f7e89b3308b93e8808a785a940fe80cd2b1d9f45ee1e1d0e689ca +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-server-linux-s390x.tar.gz) | 01dc19e94695ad1968b726c128dfbe0d6e3dac1fb6740dbad1f1342f475e2ce56f76a116abeb7e35b88997c9c9173c4c3342d78ec7f6fccd3240ddb2f8a9dfd5 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-node-linux-amd64.tar.gz) | 30a26836b7dbeb96e91866d585a42a1498d400aff9ee444873be3e252efb30470e9d0364e02cc585334ba8bad30f28b0d9cd3eaba1b5b306391b6d148267c757 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-node-linux-arm.tar.gz) | 885d97968dc806ccf7d0a918b191ba0c2aa88f47d7a977dad7bbf653e671525a0715b206e279cbcac41671c10a9b01fd685c0833e445ffdb2438a57b37589e98 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-node-linux-arm64.tar.gz) | efe30fb5e0de9cca8d40fcfbf55956ffe72c96d197ea24e09965e084376df482459e314f057ce93431831159902b9ddec96969fe95fe0350c5e545a02d63edd4 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-node-linux-ppc64le.tar.gz) | 50010742df6061a09426ef261470da4fcdabbff282331da1d66093eb4c045202109c54403b95b40858ef715df71476311396ed0314076b38dd88b452f3b6b342 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-node-linux-s390x.tar.gz) | 14f87db9f1efa8358ab16b3165fafbbfe589de0596d7ebc38d3ed68478dd628e051e24cb0b7dcca8ef418050d077fe6b1a7b3af5c3287186d5c06a9bbec39a15 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.26.9/kubernetes-node-windows-amd64.tar.gz) | cdb87a5899ce125c8c0a92f88cbe69a9de2356123b86b37010a4d39bc46ada62a05bafd1735b6eabf26d55c8e8ae2855c163b814bd784ae614a029e8af039d3c + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.26.9](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.26.9](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.26.9](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.26.9](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.26.9](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.26.8 + +## Changes by Kind + +### API Change + +- Fixed a bug where CEL expressions in CRD validation rules would incorrectly compute a high estimated cost for functions that return strings, lists or maps. + The incorrect cost was evident when the result of a function was used in subsequent operations. ([#119810](https://github.com/kubernetes/kubernetes/pull/119810), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth and Cloud Provider] +- Mark Job onPodConditions as optional in pod failure policy ([#120210](https://github.com/kubernetes/kubernetes/pull/120210), [@mimowo](https://github.com/mimowo)) [SIG API Machinery and Apps] + +### Feature + +- Kubernetes is now built with Go 1.20.8 ([#120496](https://github.com/kubernetes/kubernetes/pull/120496), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Cherry-pick #115769: Fix the problem Pod terminating stuck because of trying to umount not actual mounted dir. ([#119831](https://github.com/kubernetes/kubernetes/pull/119831), [@cartermckinnon](https://github.com/cartermckinnon)) [SIG Node and Storage] +- Fix a concurrent map access in TopologyCache's `HasPopulatedHints` method. ([#120324](https://github.com/kubernetes/kubernetes/pull/120324), [@Miciah](https://github.com/Miciah)) [SIG Apps and Network] +- Fixed a 1.26 regression scheduling bug by ensuring that preemption is skipped when a PreFilter plugin returns `UnschedulableAndUnresolvable` ([#119953](https://github.com/kubernetes/kubernetes/pull/119953), [@sanposhiho](https://github.com/sanposhiho)) [SIG Scheduling] +- Fixes a bug where images pinned by the container runtime can be garbage collected by kubelet. ([#120055](https://github.com/kubernetes/kubernetes/pull/120055), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Fixes issue https://github.com/kubernetes-sigs/cloud-provider-azure/issues/4230 and removes the additional filtering on `NotReady` nodes by the azure cloud provider code ([#119128](https://github.com/kubernetes/kubernetes/pull/119128), [@alexanderConstantinescu](https://github.com/alexanderConstantinescu)) [SIG Cloud Provider] +- Fixes regression in 1.26.5 causing running pods with devices to be terminated if kubelet is restarted ([#119706](https://github.com/kubernetes/kubernetes/pull/119706), [@ffromani](https://github.com/ffromani)) [SIG Node and Testing] +- Ignore context canceled from validate and mutate webhook ([#120019](https://github.com/kubernetes/kubernetes/pull/120019), [@divyasri537](https://github.com/divyasri537)) [SIG API Machinery] +- Kubeadm: fix nil pointer when etcd member is already removed ([#120012](https://github.com/kubernetes/kubernetes/pull/120012), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + +### Other (Cleanup or Flake) + +- When retrieving event resources, the reportingController and reportingInstance fields in the event will contain values. ([#120066](https://github.com/kubernetes/kubernetes/pull/120066), [@HirazawaUi](https://github.com/HirazawaUi)) [SIG Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/google/cel-go: [v0.12.6 → v0.12.7](https://github.com/google/cel-go/compare/v0.12.6...v0.12.7) +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp: v0.35.0 → v0.35.1 + +### Removed +_Nothing has changed._ + + + # v1.26.8 diff --git a/CHANGELOG/CHANGELOG-1.27.md b/CHANGELOG/CHANGELOG-1.27.md index 39dc0d6ff17d5..54cfdcaa64150 100644 --- a/CHANGELOG/CHANGELOG-1.27.md +++ b/CHANGELOG/CHANGELOG-1.27.md @@ -1,215 +1,212 @@ -- [v1.27.5](#v1275) - - [Downloads for v1.27.5](#downloads-for-v1275) +- [v1.27.7](#v1277) + - [Downloads for v1.27.7](#downloads-for-v1277) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - [Container Images](#container-images) - - [Changelog since v1.27.4](#changelog-since-v1274) - - [Important Security Information](#important-security-information) - - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [Changelog since v1.27.6](#changelog-since-v1276) - [Changes by Kind](#changes-by-kind) - - [API Change](#api-change) - [Feature](#feature) + - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.27.4](#v1274) - - [Downloads for v1.27.4](#downloads-for-v1274) +- [v1.27.6](#v1276) + - [Downloads for v1.27.6](#downloads-for-v1276) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - [Container Images](#container-images-1) - - [Changelog since v1.27.3](#changelog-since-v1273) + - [Changelog since v1.27.5](#changelog-since-v1275) - [Changes by Kind](#changes-by-kind-1) + - [API Change](#api-change) - [Feature](#feature-1) - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.27.3](#v1273) - - [Downloads for v1.27.3](#downloads-for-v1273) +- [v1.27.5](#v1275) + - [Downloads for v1.27.5](#downloads-for-v1275) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - [Container Images](#container-images-2) - - [Changelog since v1.27.2](#changelog-since-v1272) - - [Important Security Information](#important-security-information-1) - - [CVE-2023-2728: Bypassing enforce mountable secrets policy imposed by the ServiceAccount admission plugin](#cve-2023-2728-bypassing-enforce-mountable-secrets-policy-imposed-by-the-serviceaccount-admission-plugin) + - [Changelog since v1.27.4](#changelog-since-v1274) + - [Important Security Information](#important-security-information) + - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - [Changes by Kind](#changes-by-kind-2) + - [API Change](#api-change-1) - [Feature](#feature-2) - [Bug or Regression](#bug-or-regression-2) - [Dependencies](#dependencies-2) - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.27.2](#v1272) - - [Downloads for v1.27.2](#downloads-for-v1272) +- [v1.27.4](#v1274) + - [Downloads for v1.27.4](#downloads-for-v1274) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - [Container Images](#container-images-3) - - [Changelog since v1.27.1](#changelog-since-v1271) + - [Changelog since v1.27.3](#changelog-since-v1273) - [Changes by Kind](#changes-by-kind-3) - - [API Change](#api-change-1) - [Feature](#feature-3) - - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-3) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.27.1](#v1271) - - [Downloads for v1.27.1](#downloads-for-v1271) +- [v1.27.3](#v1273) + - [Downloads for v1.27.3](#downloads-for-v1273) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - [Container Images](#container-images-4) - - [Changelog since v1.27.0](#changelog-since-v1270) + - [Changelog since v1.27.2](#changelog-since-v1272) + - [Important Security Information](#important-security-information-1) + - [CVE-2023-2728: Bypassing enforce mountable secrets policy imposed by the ServiceAccount admission plugin](#cve-2023-2728-bypassing-enforce-mountable-secrets-policy-imposed-by-the-serviceaccount-admission-plugin) - [Changes by Kind](#changes-by-kind-4) + - [Feature](#feature-4) - [Bug or Regression](#bug-or-regression-4) - [Dependencies](#dependencies-4) - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.27.0](#v1270) - - [Downloads for v1.27.0](#downloads-for-v1270) +- [v1.27.2](#v1272) + - [Downloads for v1.27.2](#downloads-for-v1272) - [Source Code](#source-code-5) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - [Container Images](#container-images-5) - - [Changelog since v1.26.0](#changelog-since-v1260) - - [Known Issues](#known-issues) - - [The PreEnqueue extension point doesn't work for Pods going to activeQ through backoffQ](#the-preenqueue-extension-point-doesnt-work-for-pods-going-to-activeq-through-backoffq) - - [Urgent Upgrade Notes](#urgent-upgrade-notes) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Changelog since v1.27.1](#changelog-since-v1271) - [Changes by Kind](#changes-by-kind-5) - - [Deprecation](#deprecation) - [API Change](#api-change-2) - - [Feature](#feature-4) - - [Documentation](#documentation) + - [Feature](#feature-5) - [Failing Test](#failing-test-1) - [Bug or Regression](#bug-or-regression-5) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-5) - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.27.0-rc.1](#v1270-rc1) - - [Downloads for v1.27.0-rc.1](#downloads-for-v1270-rc1) +- [v1.27.1](#v1271) + - [Downloads for v1.27.1](#downloads-for-v1271) - [Source Code](#source-code-6) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - [Container Images](#container-images-6) - - [Changelog since v1.27.0-rc.0](#changelog-since-v1270-rc0) + - [Changelog since v1.27.0](#changelog-since-v1270) - [Changes by Kind](#changes-by-kind-6) - - [Feature](#feature-5) - [Bug or Regression](#bug-or-regression-6) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.27.0-rc.0](#v1270-rc0) - - [Downloads for v1.27.0-rc.0](#downloads-for-v1270-rc0) +- [v1.27.0](#v1270) + - [Downloads for v1.27.0](#downloads-for-v1270) - [Source Code](#source-code-7) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - [Container Images](#container-images-7) - - [Changelog since v1.27.0-beta.0](#changelog-since-v1270-beta0) + - [Changelog since v1.26.0](#changelog-since-v1260) + - [Known Issues](#known-issues) + - [The PreEnqueue extension point doesn't work for Pods going to activeQ through backoffQ](#the-preenqueue-extension-point-doesnt-work-for-pods-going-to-activeq-through-backoffq) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - [Changes by Kind](#changes-by-kind-7) + - [Deprecation](#deprecation) - [API Change](#api-change-3) - [Feature](#feature-6) + - [Documentation](#documentation) + - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-7) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-7) - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) -- [v1.27.0-beta.0](#v1270-beta0) - - [Downloads for v1.27.0-beta.0](#downloads-for-v1270-beta0) +- [v1.27.0-rc.1](#v1270-rc1) + - [Downloads for v1.27.0-rc.1](#downloads-for-v1270-rc1) - [Source Code](#source-code-8) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - [Container Images](#container-images-8) - - [Changelog since v1.27.0-alpha.3](#changelog-since-v1270-alpha3) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.27.0-rc.0](#changelog-since-v1270-rc0) - [Changes by Kind](#changes-by-kind-8) - - [Deprecation](#deprecation-1) - - [API Change](#api-change-4) - [Feature](#feature-7) - - [Documentation](#documentation-1) - - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-8) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-8) - [Added](#added-8) - [Changed](#changed-8) - [Removed](#removed-8) -- [v1.27.0-alpha.3](#v1270-alpha3) - - [Downloads for v1.27.0-alpha.3](#downloads-for-v1270-alpha3) +- [v1.27.0-rc.0](#v1270-rc0) + - [Downloads for v1.27.0-rc.0](#downloads-for-v1270-rc0) - [Source Code](#source-code-9) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - [Container Images](#container-images-9) - - [Changelog since v1.27.0-alpha.2](#changelog-since-v1270-alpha2) + - [Changelog since v1.27.0-beta.0](#changelog-since-v1270-beta0) - [Changes by Kind](#changes-by-kind-9) - - [Deprecation](#deprecation-2) - - [API Change](#api-change-5) + - [API Change](#api-change-4) - [Feature](#feature-8) - - [Documentation](#documentation-2) - - [Failing Test](#failing-test-3) - [Bug or Regression](#bug-or-regression-9) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-9) - [Added](#added-9) - [Changed](#changed-9) - [Removed](#removed-9) -- [v1.27.0-alpha.2](#v1270-alpha2) - - [Downloads for v1.27.0-alpha.2](#downloads-for-v1270-alpha2) +- [v1.27.0-beta.0](#v1270-beta0) + - [Downloads for v1.27.0-beta.0](#downloads-for-v1270-beta0) - [Source Code](#source-code-10) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - [Container Images](#container-images-10) - - [Changelog since v1.27.0-alpha.1](#changelog-since-v1270-alpha1) + - [Changelog since v1.27.0-alpha.3](#changelog-since-v1270-alpha3) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) - [Changes by Kind](#changes-by-kind-10) - - [API Change](#api-change-6) + - [Deprecation](#deprecation-1) + - [API Change](#api-change-5) - [Feature](#feature-9) + - [Documentation](#documentation-1) + - [Failing Test](#failing-test-3) - [Bug or Regression](#bug-or-regression-10) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-10) - [Added](#added-10) - [Changed](#changed-10) - [Removed](#removed-10) -- [v1.27.0-alpha.1](#v1270-alpha1) - - [Downloads for v1.27.0-alpha.1](#downloads-for-v1270-alpha1) +- [v1.27.0-alpha.3](#v1270-alpha3) + - [Downloads for v1.27.0-alpha.3](#downloads-for-v1270-alpha3) - [Source Code](#source-code-11) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - [Container Images](#container-images-11) - - [Changelog since v1.26.0](#changelog-since-v1260-1) + - [Changelog since v1.27.0-alpha.2](#changelog-since-v1270-alpha2) - [Changes by Kind](#changes-by-kind-11) - - [Deprecation](#deprecation-3) - - [API Change](#api-change-7) + - [Deprecation](#deprecation-2) + - [API Change](#api-change-6) - [Feature](#feature-10) - - [Documentation](#documentation-3) + - [Documentation](#documentation-2) - [Failing Test](#failing-test-4) - [Bug or Regression](#bug-or-regression-11) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) @@ -217,9 +214,272 @@ - [Added](#added-11) - [Changed](#changed-11) - [Removed](#removed-11) +- [v1.27.0-alpha.2](#v1270-alpha2) + - [Downloads for v1.27.0-alpha.2](#downloads-for-v1270-alpha2) + - [Source Code](#source-code-12) + - [Client Binaries](#client-binaries-12) + - [Server Binaries](#server-binaries-12) + - [Node Binaries](#node-binaries-12) + - [Container Images](#container-images-12) + - [Changelog since v1.27.0-alpha.1](#changelog-since-v1270-alpha1) + - [Changes by Kind](#changes-by-kind-12) + - [API Change](#api-change-7) + - [Feature](#feature-11) + - [Bug or Regression](#bug-or-regression-12) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Dependencies](#dependencies-12) + - [Added](#added-12) + - [Changed](#changed-12) + - [Removed](#removed-12) +- [v1.27.0-alpha.1](#v1270-alpha1) + - [Downloads for v1.27.0-alpha.1](#downloads-for-v1270-alpha1) + - [Source Code](#source-code-13) + - [Client Binaries](#client-binaries-13) + - [Server Binaries](#server-binaries-13) + - [Node Binaries](#node-binaries-13) + - [Container Images](#container-images-13) + - [Changelog since v1.26.0](#changelog-since-v1260-1) + - [Changes by Kind](#changes-by-kind-13) + - [Deprecation](#deprecation-3) + - [API Change](#api-change-8) + - [Feature](#feature-12) + - [Documentation](#documentation-3) + - [Failing Test](#failing-test-5) + - [Bug or Regression](#bug-or-regression-13) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-7) + - [Dependencies](#dependencies-13) + - [Added](#added-13) + - [Changed](#changed-13) + - [Removed](#removed-13) +# v1.27.7 + + +## Downloads for v1.27.7 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes.tar.gz) | b12f023254d40f791355aeec2eb84d521035648cf3e19994eacdc6c7516373f11dad942ae97d4bc8a7f255654aa7c742c1c10f18b4f4830b64e78a0b7bb35083 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-src.tar.gz) | 137db7a6ddb85c7eb0676cc3cb2bfadd726073a34b1edae4e2c3cc15165a43c0f16d163930015de8a5e357e8ff099c0f8d03f036aa245704b10348c7c91483b1 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-darwin-amd64.tar.gz) | a2ce6205bb613454167b1e6f5c6be34516e9624f1cc0eec2b6b2aa0e0b3bfc7d266379f035a7eea08625bf97413ff4cf23c9dc65669529026ad8589a0e4f9a70 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-darwin-arm64.tar.gz) | 28b6df012e1af6a062f5815a0e8c8bd440c824e520c6954a55ea9fba917c328f23069c124bab7f5bfc4b37e3a20542b33cf41d07d715f7a54bd78bcdcabca70a +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-linux-386.tar.gz) | 7fcaa119db37f5a5212fd3a5fd08db37b1ce701d67922d1f65cc757edc02f282566ae4d001e11f7b7dab3e24b27f3745189dd7fc63c90e97e9ce6a070ba8b094 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-linux-amd64.tar.gz) | 87b7ac839cac8d96efa1c6170cf32ed2bbe14e7194971df4b4736699152e294a0aa0018f3d8ae1dcf9905c3c784a7a15c297382450c0431a0daf98f300d3ef16 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-linux-arm.tar.gz) | 1bc0420005fa0e564568aa4bcf5a61e96d4c2c42afee4d34df940c4b89f0639e90771deda40a1b30f536ce8f1bd4d04cf228af98edf48ba0fa6685babe11311a +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-linux-arm64.tar.gz) | d4e96a6be6e15530e866399a5760f9410fe319217f7d91026d93a27e1a2ce9398380adc62f463a347f383ced253e359fba2fb291bd8a644f067ffc4ce8457d6c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-linux-ppc64le.tar.gz) | abcc2c651514f0b2a4cf28934078ea701d3591d318e5eac080e7958f70fa94cb4b83ca9ee0f0130749c29a20c3bf8bea545c7641cfff0b78dc78571cb8e14f22 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-linux-s390x.tar.gz) | 0fb9a5bd534a29b84be6f1a5aae59e2a05531b1ff40019896e4bbbd3bd948a96313f65140764a656e0305e6f48cdb113e6fcf1c8195d4fadfa8bf62dea18db5b +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-windows-386.tar.gz) | 36a78882ec5960a561f928e2bb4ffb1c5dc7e884ee6471441d5de6d8fe0fec6cfd5a1bcc48dd933e490b07d7f837de93eecfb9ae353dcefc5dc4f699f02b5757 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-windows-amd64.tar.gz) | a662732fa75fb2fc9ca733239eb2e5b82e3cba0311e7ed0d89b045a8a099697889f5febff50384d845600f4142936160106f60ff233961a5e143c363a839ee45 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-client-windows-arm64.tar.gz) | 993f365214cd7a66284f4e6612681c2d6bf4844717c4d990ab1baf21a8d03eb94d7ab591c1eb584389fd6985867e14ed61400b74a02935da6b8b0ec34284e8e4 + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-server-linux-amd64.tar.gz) | 8dc92641fe27bf7dcb2688a5c48cc0ff1b91b913d1f2eeac35b5bcbabc8413f768c23955141bdf707040b5ad0de55bbb7e407b4eed3d9d26c1e1c9e3acdb409b +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-server-linux-arm64.tar.gz) | a554b9319897c4cc65d8e4ca32b83103a71cf4512fb187a7f2b85898e4d10618c17b26ff0aadd8e265be009b215f100de4ab0b14a77b3b309b0786f03d479eb5 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-server-linux-ppc64le.tar.gz) | 001fc616801c239bddc02789ec9b9cd765a635d3f2d6cd04086b42eb81200bc9d0904e0a9e5b72756c420e36ce244169532c01cefd60f423eaaa85e236f06a49 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-server-linux-s390x.tar.gz) | a44f54c8fedea4e52e205831594f72f63d5c61cde0b9fa0ad6939eddf83664385edf767591187e166178a9af6b40da40607f26e098cb24bc2e5a88f7105318d3 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-node-linux-amd64.tar.gz) | ee31a3ad00412e122aacb82070a3257d558cd52e270312af538d9e7d22ad1638b71d9e02dddebf0b853c911284e172d7a16c0927c0e2012f761219850c0950aa +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-node-linux-arm64.tar.gz) | 95567ed1c5b892ee47f147d6ca8faef5e7915dfe34dc17141fa01326b4d0db0a8ff2e6589f681f9df5145b91878054c2b1e5030012d43500e4d525d28d3cb97b +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-node-linux-ppc64le.tar.gz) | 37cef5fb54c7a2ba8f542356ada66183bf281df41c989616694c87d06156aff241c03b7005c288dafad1889a5f989bd583bdf18039df9e31a874a71d4e5d9316 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-node-linux-s390x.tar.gz) | 414a71046b78be9937225396bccd747d17aba14890b77da672272129dd6b7675ae3522237388436ae79026c1837741af45e3c239c50149a4f5c16bd01066e1a9 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.27.7/kubernetes-node-windows-amd64.tar.gz) | df34997bfe6e5f2526adc73680988cf6e8670efd15efb5c60cca0c3dc8e384eb0bbf85c0a5ffa8edb93f6d129dd58531ab00830e511603e4ded87c18fb8a60d3 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.27.7](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.27.7](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.27.7](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.27.7](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.27.7](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.27.6 + +## Changes by Kind + +### Feature + +- Kubernetes is now built with Go 1.20.10 ([#121152](https://github.com/kubernetes/kubernetes/pull/121152), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Failing Test + +- E2e framework: retrying after intermittent apiserver failures was fixed in WaitForPodsResponding ([#120559](https://github.com/kubernetes/kubernetes/pull/120559), [@pohly](https://github.com/pohly)) [SIG Testing] + +### Bug or Regression + +- Adds an opt-in mitigation for http/2 DOS vulnerabilities for CVE-2023-44487 and CVE-2023-39325 for the API server when the client is unauthenticated. The mitigation may be enabled by setting the `UnauthenticatedHTTP2DOSMitigation` feature gate to `true` (it is disabled by default). An API server fronted by an L7 load balancer that already mitigates these http/2 attacks may choose not to enable the kube-apiserver mitigation to avoid disrupting load balancer → kube-apiserver connections if http/2 requests from multiple clients share the same backend connection. An API server on a private network may choose not to enable the kube-apiserver mitigation to prevent performance regressions for unauthenticated clients. Authenticated requests rely on the fix in golang.org/x/net v0.17.0 alone. https://issue.k8s.io/121197 tracks further mitigation of http/2 attacks by authenticated clients. ([#121199](https://github.com/kubernetes/kubernetes/pull/121199), [@enj](https://github.com/enj)) [SIG API Machinery] +- Fix a bug in cronjob controller where already created jobs may be missing from the status. ([#120649](https://github.com/kubernetes/kubernetes/pull/120649), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Fixed a 1.27.4 regression where kube-controller-manager can crash when StatefulSet with Parallel policy and PVC labels is scaled up. ([#121185](https://github.com/kubernetes/kubernetes/pull/121185), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) [SIG Apps] +- Fixed an issue to not drain all the pods in a namespace when an empty-selector i.e. "{}" is specified in a Pod Disruption Budget (PDB) ([#121132](https://github.com/kubernetes/kubernetes/pull/121132), [@sairameshv](https://github.com/sairameshv)) [SIG Apps] +- Fixed attaching volumes after detach errors. Now volumes that failed to detach are not treated as attached, Kubernetes will make sure they are fully attached before they can be used by pods. ([#120595](https://github.com/kubernetes/kubernetes/pull/120595), [@jsafrane](https://github.com/jsafrane)) [SIG Apps and Storage] +- Fixes a bug where Services using finalizers may hold onto ClusterIP and/or NodePort allocated resources for longer than expected if the finalizer is removed using the status subresource ([#120655](https://github.com/kubernetes/kubernetes/pull/120655), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Fixes bug where OpenAPIV2 config was used instead of V3, and gives clear error message about OpenAPIV3 requirement ([#120612](https://github.com/kubernetes/kubernetes/pull/120612), [@alexzielenski](https://github.com/alexzielenski)) [SIG API Machinery] +- Fixes creationTimestamp: null causing unnecessary writes to etcd ([#116865](https://github.com/kubernetes/kubernetes/pull/116865), [@alexzielenski](https://github.com/alexzielenski)) [SIG API Machinery and Testing] +- Revised the logic for DaemonSet rolling update to exclude nodes if scheduling constraints are not met. + This eliminates the problem of rolling updates to a DaemonSet getting stuck around tolerations. ([#120786](https://github.com/kubernetes/kubernetes/pull/120786), [@mochizuki875](https://github.com/mochizuki875)) [SIG Apps and Testing] +- Sometimes, the scheduler incorrectly placed a pod in the "unschedulable" queue instead of the "backoff" queue. This happened when some plugin previously declared the pod as "unschedulable" and then in a later attempt encounters some other error. Scheduling of that pod then got delayed by up to five minutes, after which periodic flushing moved the pod back into the "active" queue. ([#120334](https://github.com/kubernetes/kubernetes/pull/120334), [@pohly](https://github.com/pohly)) [SIG Scheduling] + +### Other (Cleanup or Flake) + +- Etcd: update to v3.5.9 ([#118079](https://github.com/kubernetes/kubernetes/pull/118079), [@nikhita](https://github.com/nikhita)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] +- Fixes an issue where the vsphere cloud provider will not trust a certificate if: + - The issuer of the certificate is unknown (x509.UnknownAuthorityError) + - The requested name does not match the set of authorized names (x509.HostnameError) + - The error surfaced after attempting a connection contains one of the substrings: "certificate is not trusted" or "certificate signed by unknown authority" ([#120767](https://github.com/kubernetes/kubernetes/pull/120767), [@MadhavJivrajani](https://github.com/MadhavJivrajani)) [SIG Architecture and Cloud Provider] +- Kubernetes is now built with Go 1.20.9 ([#121024](https://github.com/kubernetes/kubernetes/pull/121024), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- Set the resolution for the job_controller_job_sync_duration_seconds metric from 4ms to 1min ([#120668](https://github.com/kubernetes/kubernetes/pull/120668), [@mimowo](https://github.com/mimowo)) [SIG Apps and Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/vmware/govmomi: [v0.30.0 → v0.30.6](https://github.com/vmware/govmomi/compare/v0.30.0...v0.30.6) +- golang.org/x/crypto: v0.1.0 → v0.14.0 +- golang.org/x/net: v0.8.0 → v0.17.0 +- golang.org/x/sys: v0.6.0 → v0.13.0 +- golang.org/x/term: v0.6.0 → v0.13.0 +- golang.org/x/text: v0.8.0 → v0.13.0 + +### Removed +_Nothing has changed._ + + + +# v1.27.6 + + +## Downloads for v1.27.6 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes.tar.gz) | cbd2427412bbd229439afcf7d7b1712536515f93657c9971b66a430d9858a54383db33046eade4ce2891f26f264cccee5ad34ca6e03e874af8557c840b7c627d +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-src.tar.gz) | 006c3d901f3bf417ef5472708aaa47fc857bef6c7e4fe9e89693ec5b9040f06c5b44be3b74130b12a5bd17650855cb101263033100e0d46275eafd489924987b + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-darwin-amd64.tar.gz) | b47b6213859d1397b55a0000d1c8daa1f630dd8a5ca553fcb541f4645fb01409f2ae2dd85a8c5274f5ec99cd5082de366e036cae72f8716861f83f8e38652fd2 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-darwin-arm64.tar.gz) | 4e1c6db712de967f6d9dc1c21b2fedf99aae3671242a8559c2bad43dc45e22e399e51bf606d2f6b4a943098fb61cf588324b6c9ba5615d18733d319183ed2c10 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-linux-386.tar.gz) | 992074087f3df0681ead508b8f35b173caac235813fe6db778b2e1367ae64cca9ff6a0d47bfae0c90612ac83361385ba82a45d01fc7b86f94752e7df32d006f7 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-linux-amd64.tar.gz) | 01e57c5d2c92094df17ce079a8944df73a2834362f3e9b051b1e3923b51b9f02bba7f4aab8ffd3183a0f99cfc102bc61e3dde77142e36f428612cced55d33892 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-linux-arm.tar.gz) | 08a51b07f14aaf4f7e9c6a3fa287c70b34195fcf7ca38968ef97cf6f70e9ab7ea17c5724fb94734a8a11ef8254122d8d246a8b777f95c1c753d430d4b64bb0ea +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-linux-arm64.tar.gz) | fb0cbe29113ba495c45843bb98e644570bf6284c2b689327d1a07aa5cd8d336f997a3f80ce0fd6df16c3a706b511922b13610b86632f07506032e51f0161479e +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-linux-ppc64le.tar.gz) | 81670992dcd1944881c54c24ef936c8fbbb7f01e7abd2c253f1894137cb114b60845e5a5181ad826c08074196f8c8bd9077322b43b26b72b0b06cee4c296dd45 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-linux-s390x.tar.gz) | e6e094117f04341bb704fae663eb95efe7273a6752acad1f82ed4dbf830d70eb204b9516dc5cfaea69dc31f979342123eaf7ec64dd88998479d8b06ab9851af5 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-windows-386.tar.gz) | bf869d6c20f1ea94dcfb0240b7d20a9ef5a021eab1f6ab6a840869af6ef420f9fe0cbf38f79a5d9055d27b8dc595cfb6587fad56259bbfb11f5b07a321a6ba9b +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-windows-amd64.tar.gz) | e503aaade44f3b6edfa532399640dc921e38ff5f0b3b6a71c347dd447065fa74d2d9d683de5325febf1de39737e3d1a8ed07781870d072d85f0223635729bcab +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-client-windows-arm64.tar.gz) | bfc41e7401ab7ad300a305603130ae7dbcf200c4370bea886d069d2ff4694fcb6cd6772849015c2259b693d90f85f575f7d3ecca54d31cf3a9b53569abac498e + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-server-linux-amd64.tar.gz) | 27d62d0bc05d2f117427004a19e5d319337169c0219cc2055f519af4c356001dd5a37253708e835d4a88f14823adb555c4e2753347d739fece47e76337210ef8 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-server-linux-arm64.tar.gz) | a5dc93ca776751a54d4d2fc33b4ad2e3f4cd15610e0e38592a08e82bd3eb3e99852e17a94c002665b60ddd5433cbd1d04eb25d0763117d907595ab620ab9f885 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-server-linux-ppc64le.tar.gz) | 3f6aeb89e8455675ba638014a4625ca9a491b557c1b23e59af3c2b917ec4b671960b1180e2318e44d4aa01e9c28bb3394853edd6e41dd4f83d2bff9cb84de676 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-server-linux-s390x.tar.gz) | 517ac5b1ad72abf3043e3841aca7ff5b3b26206f3c7c37401a25fa0c74437dcfb0b6cb053bd64a58b7d062821c82156f66190defe5775d2eab7d9b2912f70b99 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-node-linux-amd64.tar.gz) | d93e9d2995fb9ff0a850c6217c8d47dc55163cc36a87e4820cd1303ae05f8f0be49fedb1a6f193812a5da49e26b0e9fed4c8f3f8856eda6164dfe11ca339b796 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-node-linux-arm64.tar.gz) | d634fb1eb577b999179212055fdef04a270e74ed3f5b765ad94dadc6349dc091f69d66b99f12a119b576171a21d2cd7bb4ca4e00c006e782b987764e854e80b3 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-node-linux-ppc64le.tar.gz) | d81c05c511c0d1a0e23e9e31e7ca7d5cf2ae854d6e78360cbeb8a7af929796b4f446199c0bad095bf52b31b9541aa8159694e983a757a6a5416b3eb4e75e8404 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-node-linux-s390x.tar.gz) | 22a0e8689d26c6b5fd9778930f430f7633a17e4956bc23de33b7fe22afaac1b24aa4422c59c549a285c9e4ca17f3ee12bc8d221eaaf98627816bad815ff3bd1d +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.27.6/kubernetes-node-windows-amd64.tar.gz) | 566c0848f94e30ee2ef287a83a4d82f6fe93f582e4a6db9de884fdfe33c941a4731c905206d251d9b3e096175198fd18dd211fee34f151e3c651443ecce09b3d + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.27.6](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.27.6](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.27.6](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.27.6](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.27.6](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.27.5 + +## Changes by Kind + +### API Change + +- Fixed a bug where CEL expressions in CRD validation rules would incorrectly compute a high estimated cost for functions that return strings, lists or maps. + The incorrect cost was evident when the result of a function was used in subsequent operations. ([#119809](https://github.com/kubernetes/kubernetes/pull/119809), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth and Cloud Provider] +- Mark Job onPodConditions as optional in pod failure policy ([#120209](https://github.com/kubernetes/kubernetes/pull/120209), [@mimowo](https://github.com/mimowo)) [SIG API Machinery and Apps] + +### Feature + +- Kubernetes is now built with Go 1.20.8 ([#120494](https://github.com/kubernetes/kubernetes/pull/120494), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Fix OpenAPI v3 not being cleaned up after deleting APIServices ([#120108](https://github.com/kubernetes/kubernetes/pull/120108), [@tnqn](https://github.com/tnqn)) [SIG API Machinery and Testing] +- Fix a concurrent map access in TopologyCache's `HasPopulatedHints` method. ([#120323](https://github.com/kubernetes/kubernetes/pull/120323), [@Miciah](https://github.com/Miciah)) [SIG Apps and Network] +- Fixed a 1.26 regression scheduling bug by ensuring that preemption is skipped when a PreFilter plugin returns `UnschedulableAndUnresolvable` ([#119952](https://github.com/kubernetes/kubernetes/pull/119952), [@sanposhiho](https://github.com/sanposhiho)) [SIG Scheduling] +- Fixed a 1.27 scheduling regression that PostFilter plugin may not function if previous PreFilter plugins return Skip ([#119943](https://github.com/kubernetes/kubernetes/pull/119943), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Fixed a regression in default 1.27 configurations in kube-apiserver: fixed the AggregatedDiscoveryEndpoint feature (beta in 1.27+) to successfully fetch discovery information from aggregated API servers that do not check `Accept` headers when serving the `/apis` endpoint ([#120360](https://github.com/kubernetes/kubernetes/pull/120360), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery] +- Fixes a bug where images pinned by the container runtime can be garbage collected by kubelet. ([#120054](https://github.com/kubernetes/kubernetes/pull/120054), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Fixes a regression exposed in 1.27 by kubectl switching to openapi v3 by making apiregistration.k8s.io discoverable in openapi/v3 ([#119841](https://github.com/kubernetes/kubernetes/pull/119841), [@atiratree](https://github.com/atiratree)) [SIG API Machinery] +- Fixes a regression exposed in 1.27 by kubectl switching to openapi v3 by resolving a flake in openapi v3 aggregation ([#119839](https://github.com/kubernetes/kubernetes/pull/119839), [@atiratree](https://github.com/atiratree)) [SIG API Machinery] +- Fixes issue https://github.com/kubernetes-sigs/cloud-provider-azure/issues/4230 and removes the additional filtering on `NotReady` nodes by the azure cloud provider code ([#119128](https://github.com/kubernetes/kubernetes/pull/119128), [@alexanderConstantinescu](https://github.com/alexanderConstantinescu)) [SIG Cloud Provider] +- Fixes regression in 1.27.2 causing running pods with devices to be terminated if kubelet is restarted ([#119432](https://github.com/kubernetes/kubernetes/pull/119432), [@ffromani](https://github.com/ffromani)) [SIG Node and Testing] +- Ignore context canceled from validate and mutate webhook ([#120020](https://github.com/kubernetes/kubernetes/pull/120020), [@divyasri537](https://github.com/divyasri537)) [SIG API Machinery] +- Kubeadm: fix nil pointer when etcd member is already removed ([#120011](https://github.com/kubernetes/kubernetes/pull/120011), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + +### Other (Cleanup or Flake) + +- When retrieving event resources, the reportingController and reportingInstance fields in the event will contain values. ([#120067](https://github.com/kubernetes/kubernetes/pull/120067), [@HirazawaUi](https://github.com/HirazawaUi)) [SIG Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/google/cel-go: [v0.12.6 → v0.12.7](https://github.com/google/cel-go/compare/v0.12.6...v0.12.7) + +### Removed +_Nothing has changed._ + + + # v1.27.5 diff --git a/CHANGELOG/CHANGELOG-1.28.md b/CHANGELOG/CHANGELOG-1.28.md index 0b81ae3a5309a..3d16e2ecd974a 100644 --- a/CHANGELOG/CHANGELOG-1.28.md +++ b/CHANGELOG/CHANGELOG-1.28.md @@ -1,173 +1,433 @@ -- [v1.28.1](#v1281) - - [Downloads for v1.28.1](#downloads-for-v1281) +- [v1.28.3](#v1283) + - [Downloads for v1.28.3](#downloads-for-v1283) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - [Container Images](#container-images) - - [Changelog since v1.28.0](#changelog-since-v1280) - - [Important Security Information](#important-security-information) - - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [Changelog since v1.28.2](#changelog-since-v1282) - [Changes by Kind](#changes-by-kind) + - [Feature](#feature) + - [Failing Test](#failing-test) + - [Bug or Regression](#bug-or-regression) - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.28.0](#v1280) - - [Downloads for v1.28.0](#downloads-for-v1280) +- [v1.28.2](#v1282) + - [Downloads for v1.28.2](#downloads-for-v1282) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - [Container Images](#container-images-1) - - [Changelog since v1.27.0](#changelog-since-v1270) - - [Urgent Upgrade Notes](#urgent-upgrade-notes) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Changelog since v1.28.1](#changelog-since-v1281) - [Changes by Kind](#changes-by-kind-1) - - [Deprecation](#deprecation) - [API Change](#api-change) - - [Feature](#feature) - - [Documentation](#documentation) - - [Failing Test](#failing-test) - - [Bug or Regression](#bug-or-regression) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) + - [Feature](#feature-1) + - [Bug or Regression](#bug-or-regression-1) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.28.0-rc.1](#v1280-rc1) - - [Downloads for v1.28.0-rc.1](#downloads-for-v1280-rc1) +- [v1.28.1](#v1281) + - [Downloads for v1.28.1](#downloads-for-v1281) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - [Container Images](#container-images-2) - - [Changelog since v1.28.0-rc.0](#changelog-since-v1280-rc0) + - [Changelog since v1.28.0](#changelog-since-v1280) + - [Important Security Information](#important-security-information) + - [CVE-2023-3955: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3955-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) + - [CVE-2023-3676: Insufficient input sanitization on Windows nodes leads to privilege escalation](#cve-2023-3676-insufficient-input-sanitization-on-windows-nodes-leads-to-privilege-escalation) - [Changes by Kind](#changes-by-kind-2) - - [API Change](#api-change-1) - - [Feature](#feature-1) - - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-2) - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.28.0-rc.0](#v1280-rc0) - - [Downloads for v1.28.0-rc.0](#downloads-for-v1280-rc0) +- [v1.28.0](#v1280) + - [Downloads for v1.28.0](#downloads-for-v1280) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - [Container Images](#container-images-3) - - [Changelog since v1.28.0-beta.0](#changelog-since-v1280-beta0) + - [Changelog since v1.27.0](#changelog-since-v1270) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - [Changes by Kind](#changes-by-kind-3) - - [API Change](#api-change-2) + - [Deprecation](#deprecation) + - [API Change](#api-change-1) - [Feature](#feature-2) + - [Documentation](#documentation) + - [Failing Test](#failing-test-1) + - [Bug or Regression](#bug-or-regression-2) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.28.0-beta.0](#v1280-beta0) - - [Downloads for v1.28.0-beta.0](#downloads-for-v1280-beta0) +- [v1.28.0-rc.1](#v1280-rc1) + - [Downloads for v1.28.0-rc.1](#downloads-for-v1280-rc1) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - [Container Images](#container-images-4) - - [Changelog since v1.28.0-alpha.4](#changelog-since-v1280-alpha4) + - [Changelog since v1.28.0-rc.0](#changelog-since-v1280-rc0) - [Changes by Kind](#changes-by-kind-4) - - [Deprecation](#deprecation-1) - - [API Change](#api-change-3) + - [API Change](#api-change-2) - [Feature](#feature-3) - - [Failing Test](#failing-test-1) - - [Bug or Regression](#bug-or-regression-2) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) + - [Bug or Regression](#bug-or-regression-3) - [Dependencies](#dependencies-4) - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.28.0-alpha.4](#v1280-alpha4) - - [Downloads for v1.28.0-alpha.4](#downloads-for-v1280-alpha4) +- [v1.28.0-rc.0](#v1280-rc0) + - [Downloads for v1.28.0-rc.0](#downloads-for-v1280-rc0) - [Source Code](#source-code-5) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - [Container Images](#container-images-5) - - [Changelog since v1.28.0-alpha.3](#changelog-since-v1280-alpha3) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.28.0-beta.0](#changelog-since-v1280-beta0) - [Changes by Kind](#changes-by-kind-5) - - [Deprecation](#deprecation-2) - - [API Change](#api-change-4) + - [API Change](#api-change-3) - [Feature](#feature-4) - - [Bug or Regression](#bug-or-regression-3) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-5) - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.28.0-alpha.3](#v1280-alpha3) - - [Downloads for v1.28.0-alpha.3](#downloads-for-v1280-alpha3) +- [v1.28.0-beta.0](#v1280-beta0) + - [Downloads for v1.28.0-beta.0](#downloads-for-v1280-beta0) - [Source Code](#source-code-6) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - [Container Images](#container-images-6) - - [Changelog since v1.28.0-alpha.2](#changelog-since-v1280-alpha2) + - [Changelog since v1.28.0-alpha.4](#changelog-since-v1280-alpha4) - [Changes by Kind](#changes-by-kind-6) - - [Deprecation](#deprecation-3) - - [API Change](#api-change-5) + - [Deprecation](#deprecation-1) + - [API Change](#api-change-4) - [Feature](#feature-5) + - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-4) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.28.0-alpha.2](#v1280-alpha2) - - [Downloads for v1.28.0-alpha.2](#downloads-for-v1280-alpha2) +- [v1.28.0-alpha.4](#v1280-alpha4) + - [Downloads for v1.28.0-alpha.4](#downloads-for-v1280-alpha4) - [Source Code](#source-code-7) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - [Container Images](#container-images-7) - - [Changelog since v1.28.0-alpha.1](#changelog-since-v1280-alpha1) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) + - [Changelog since v1.28.0-alpha.3](#changelog-since-v1280-alpha3) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) - [Changes by Kind](#changes-by-kind-7) + - [Deprecation](#deprecation-2) + - [API Change](#api-change-5) - [Feature](#feature-6) - [Bug or Regression](#bug-or-regression-5) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-7) - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) -- [v1.28.0-alpha.1](#v1280-alpha1) - - [Downloads for v1.28.0-alpha.1](#downloads-for-v1280-alpha1) +- [v1.28.0-alpha.3](#v1280-alpha3) + - [Downloads for v1.28.0-alpha.3](#downloads-for-v1280-alpha3) - [Source Code](#source-code-8) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - [Container Images](#container-images-8) - - [Changelog since v1.27.0](#changelog-since-v1270-1) + - [Changelog since v1.28.0-alpha.2](#changelog-since-v1280-alpha2) - [Changes by Kind](#changes-by-kind-8) - - [Deprecation](#deprecation-4) + - [Deprecation](#deprecation-3) - [API Change](#api-change-6) - [Feature](#feature-7) - - [Documentation](#documentation-1) - - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-6) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) - [Dependencies](#dependencies-8) - [Added](#added-8) - [Changed](#changed-8) - [Removed](#removed-8) +- [v1.28.0-alpha.2](#v1280-alpha2) + - [Downloads for v1.28.0-alpha.2](#downloads-for-v1280-alpha2) + - [Source Code](#source-code-9) + - [Client Binaries](#client-binaries-9) + - [Server Binaries](#server-binaries-9) + - [Node Binaries](#node-binaries-9) + - [Container Images](#container-images-9) + - [Changelog since v1.28.0-alpha.1](#changelog-since-v1280-alpha1) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-2) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-2) + - [Changes by Kind](#changes-by-kind-9) + - [Feature](#feature-8) + - [Bug or Regression](#bug-or-regression-7) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-6) + - [Dependencies](#dependencies-9) + - [Added](#added-9) + - [Changed](#changed-9) + - [Removed](#removed-9) +- [v1.28.0-alpha.1](#v1280-alpha1) + - [Downloads for v1.28.0-alpha.1](#downloads-for-v1280-alpha1) + - [Source Code](#source-code-10) + - [Client Binaries](#client-binaries-10) + - [Server Binaries](#server-binaries-10) + - [Node Binaries](#node-binaries-10) + - [Container Images](#container-images-10) + - [Changelog since v1.27.0](#changelog-since-v1270-1) + - [Changes by Kind](#changes-by-kind-10) + - [Deprecation](#deprecation-4) + - [API Change](#api-change-7) + - [Feature](#feature-9) + - [Documentation](#documentation-1) + - [Failing Test](#failing-test-3) + - [Bug or Regression](#bug-or-regression-8) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-7) + - [Dependencies](#dependencies-10) + - [Added](#added-10) + - [Changed](#changed-10) + - [Removed](#removed-10) +# v1.28.3 + + +## Downloads for v1.28.3 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes.tar.gz) | 98fd6d3713e8708e7664adf7e9fcae73b570ec0e45b40aa9e8344eb9301b5b82c103e263347bf6996813ef6c8df302727754b955c20afa1c69f3784c0a2432d5 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-src.tar.gz) | 1568c2f0464dd4c3c99e636dcd8ff6ec7716ae0c7e2c6bcb0b98cf30006f282bc011a2296a449026886f84ff7d37963f59e4cc5afdf45ec8d392b7d71a738f55 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-darwin-amd64.tar.gz) | a49da64f8408cd91e082ef199daf5f1d84460620a78c8f9a65ee0b1905a02b4f4ade2abe95e342291c4ea341be2dccb53cdd9b7f05ee79c33772c786f36e116f +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-darwin-arm64.tar.gz) | 5565934425a12c8a38e2270839624dcf617346ceca07c2b5f8fda25940c6361b6ec948babb2d02d855edb2fadaa57c12856a8f7fc67a34a606710486b326a4ce +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-linux-386.tar.gz) | 58f0a7342903350e25acca29ffb59851fff47c49e66a4d5f27e73b49baa570596741dc1989a53f0a84361d5dcc1f41a3bb3bf8369ee7c7ac85275056bd17e59b +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-linux-amd64.tar.gz) | 0d5e1d09eb0008a67b1d59aa63e8b6e7e7230ba1ab32cdac8a722188d166f5dc9008b595947c42aff8a410596ece0a4346cd19ac9ab3a2913cee0eaab127b238 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-linux-arm.tar.gz) | eae7aa6f40b94dd3d098f3a5c788e1b9dc3051a055b6bac64a602c9ebfcc70645231269c0322abf94f9fd65348c16c0be78385d324e80e00ef4843b0f82cd49f +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-linux-arm64.tar.gz) | 95192e41d73e4b0585b8fd54e4c79c92c1ed9d37b80dfa8c8d3a1e289b5a7c32d67de4f5fce193e5f0fc82867f7c6f73d75fdc516ca437c236930eff90106088 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-linux-ppc64le.tar.gz) | 3e4289893cb3f8492d99b77d09eddaa3d55ee2cbf6a70c7ff1c9ce38f6b744c62c545a55d01d43d716876c648f628044945e90c361a82cc1a008ae808b29c92b +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-linux-s390x.tar.gz) | 8a2b4a3180752612dd636c26853e9b23fb7a58a8b631f6f29172c3002ebdf7086d7def21a187ced179fda4d08a613390dee1a0dae46e5d136c9cb8813da54049 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-windows-386.tar.gz) | 57c0c4dfffe7e81d5144ee672d200e8204aab635701b2418e6cdb6eb130a65bbe08f7651dfdad7ae047818a33fb37aad2b7d6bfbb2d853c35108f1462c3cfa27 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-windows-amd64.tar.gz) | 3607e1ce781dcd636de8bc7f470257675c6e333bb6a56948016463b2581b2ed4bad1f7b19edf5e0fccaa767d4cc57fdc196f0ce18182001901f0084bd8c5b98b +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-client-windows-arm64.tar.gz) | a1fcacceccf712c752521a505e14aed113c75f40b690293e9f5411c5bfaeab9946cd2f067cdf7c4e5f57407c104feb9f4fa6a74c763653c460b89cae4a0d317e + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-server-linux-amd64.tar.gz) | cca2f7a1aa100c2ccc789536f27015848a45c7261523e605a2dcd0d49a06db85320706725c7f34cb9e90402f6d3349798a6d62c160e6811acb7c5bccd54aaff2 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-server-linux-arm64.tar.gz) | b5b2705a45d0ce2bf7bc3b2a5854796497b5b88a77aabae162fd6fb9e20c1fa71ba620c3183d4098a4f2f9f029406e3c5a36c9d21ac2471e51029fbee984c3db +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-server-linux-ppc64le.tar.gz) | 9b8637025481cb4774491634b62d03f94e152728710fe2e3b08a5f895d1d2902ec287f73a07ef441e16c5af8f3fc20659eec478f12f816948d2229123378a0f2 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-server-linux-s390x.tar.gz) | 78b2eb80422e23e7124f54103b83438d1ef18f25dd7c0d598f42f2aac5982f49165fee7da5a0d407ac81dda11161bc6a676cb4ce39fdcd5b00347662848f7428 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-node-linux-amd64.tar.gz) | fa04b8aaaa0c8cf9eec44f5a7b623547ab4821db45c6cd8c877b2eb0b6419c5f5ee5f2181af5bad9d1017811e1ea7b78362e1d0d6ba455e5c0cf899f2ce7d996 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-node-linux-arm64.tar.gz) | f18549315c58a86b4ec96eb04ea3dc10e3c01f9b835f721d8d20de4053e345e2789d7a4c211422a10f0f2b0ec3f12535766f61aabf174f400454434d4534c649 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-node-linux-ppc64le.tar.gz) | 03bbf9a6053a42fef41048470cd4e8956bb0cd6b3c407ce86f5192f2a2c95c97ad9c9823d7e257882bf4cf2489b5225a3f49e7948ff7f1e758d4084faef0c1b8 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-node-linux-s390x.tar.gz) | 2495e2810f763174102884d25e932d2dcd6984adf1bfe6733837c23174b20ee8fe47d6a9961ccc86fe233a161544d153a462e71badf6acc8d4f89513d82bcd37 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.28.3/kubernetes-node-windows-amd64.tar.gz) | 0e29716b0e16bd3aa3e85d85fb6ddc0de895a7ad4b0d6aacc5503c93b12f67139138f92b2b0dbf91001f32a3c9ac31a0f9116cfff3701d3546ac204f58791c0a + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.28.3](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.28.3](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.28.3](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.28.3](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.28.3](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) +[registry.k8s.io/kubectl:v1.28.3](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-s390x) + +## Changelog since v1.28.2 + +## Changes by Kind + +### Feature + +- Kubernetes is now built with Go 1.20.10 ([#121153](https://github.com/kubernetes/kubernetes/pull/121153), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- Kubernetes is now built with Go 1.20.9 ([#121025](https://github.com/kubernetes/kubernetes/pull/121025), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Failing Test + +- E2e framework: retrying after intermittent apiserver failures was fixed in WaitForPodsResponding ([#120559](https://github.com/kubernetes/kubernetes/pull/120559), [@pohly](https://github.com/pohly)) [SIG Testing] + +### Bug or Regression + +- Adds an opt-in mitigation for http/2 DOS vulnerabilities for CVE-2023-44487 and CVE-2023-39325 for the API server when the client is unauthenticated. The mitigation may be enabled by setting the `UnauthenticatedHTTP2DOSMitigation` feature gate to `true` (it is disabled by default). An API server fronted by an L7 load balancer that already mitigates these http/2 attacks may choose not to enable the kube-apiserver mitigation to avoid disrupting load balancer → kube-apiserver connections if http/2 requests from multiple clients share the same backend connection. An API server on a private network may choose not to enable the kube-apiserver mitigation to prevent performance regressions for unauthenticated clients. Authenticated requests rely on the fix in golang.org/x/net v0.17.0 alone. https://issue.k8s.io/121197 tracks further mitigation of http/2 attacks by authenticated clients. ([#121196](https://github.com/kubernetes/kubernetes/pull/121196), [@enj](https://github.com/enj)) [SIG API Machinery] +- Fix 1.28.0 regression where adding aggregated APIService objects could cause apiserver to panic and affect the health check ([#121040](https://github.com/kubernetes/kubernetes/pull/121040), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery and Testing] +- Fix a bug in cronjob controller where already created jobs may be missing from the status. ([#120649](https://github.com/kubernetes/kubernetes/pull/120649), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Fixed a 1.28.0 regression where kube-controller-manager can crash when StatefulSet with Parallel policy and PVC labels is scaled up. ([#121184](https://github.com/kubernetes/kubernetes/pull/121184), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) [SIG Apps] +- Fixed a bug where containers would not start on cgroupv2 systems where swap is disabled. ([#120924](https://github.com/kubernetes/kubernetes/pull/120924), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed a regression in kube-proxy where it might refuse to start if given + single-stack IPv6 configuration options on a node that has both IPv4 and + IPv6 IPs. ([#121008](https://github.com/kubernetes/kubernetes/pull/121008), [@danwinship](https://github.com/danwinship)) [SIG Network] +- Fixed an issue to not drain all the pods in a namespace when an empty-selector i.e. "{}" is specified in a Pod Disruption Budget (PDB) ([#121131](https://github.com/kubernetes/kubernetes/pull/121131), [@sairameshv](https://github.com/sairameshv)) [SIG Apps] +- Fixed attaching volumes after detach errors. Now volumes that failed to detach are not treated as attached, Kubernetes will make sure they are fully attached before they can be used by pods. ([#120595](https://github.com/kubernetes/kubernetes/pull/120595), [@jsafrane](https://github.com/jsafrane)) [SIG Apps and Storage] +- Fixed bug to surface events for the following metrics: apiserver_encryption_config_controller_automatic_reload_failures_total, apiserver_encryption_config_controller_automatic_reload_last_timestamp_seconds, apiserver_encryption_config_controller_automatic_reload_success_total ([#120544](https://github.com/kubernetes/kubernetes/pull/120544), [@ritazh](https://github.com/ritazh)) [SIG API Machinery, Auth and Testing] +- Fixes a bug where Services using finalizers may hold onto ClusterIP and/or NodePort allocated resources for longer than expected if the finalizer is removed using the status subresource ([#120654](https://github.com/kubernetes/kubernetes/pull/120654), [@aojea](https://github.com/aojea)) [SIG Testing] +- Revised the logic for DaemonSet rolling update to exclude nodes if scheduling constraints are not met. + This eliminates the problem of rolling updates to a DaemonSet getting stuck around tolerations. ([#120785](https://github.com/kubernetes/kubernetes/pull/120785), [@mochizuki875](https://github.com/mochizuki875)) [SIG Apps and Testing] +- Sometimes, the scheduler incorrectly placed a pod in the "unschedulable" queue instead of the "backoff" queue. This happened when some plugin previously declared the pod as "unschedulable" and then in a later attempt encounters some other error. Scheduling of that pod then got delayed by up to five minutes, after which periodic flushing moved the pod back into the "active" queue. ([#120334](https://github.com/kubernetes/kubernetes/pull/120334), [@pohly](https://github.com/pohly)) [SIG Scheduling] + +### Other (Cleanup or Flake) + +- Fixes an issue where the vsphere cloud provider will not trust a certificate if: + - The issuer of the certificate is unknown (x509.UnknownAuthorityError) + - The requested name does not match the set of authorized names (x509.HostnameError) + - The error surfaced after attempting a connection contains one of the substrings: "certificate is not trusted" or "certificate signed by unknown authority" ([#120768](https://github.com/kubernetes/kubernetes/pull/120768), [@MadhavJivrajani](https://github.com/MadhavJivrajani)) [SIG Architecture and Cloud Provider] +- Set the resolution for the job_controller_job_sync_duration_seconds metric from 4ms to 1min ([#120667](https://github.com/kubernetes/kubernetes/pull/120667), [@mimowo](https://github.com/mimowo)) [SIG Apps and Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/vmware/govmomi: [v0.30.0 → v0.30.6](https://github.com/vmware/govmomi/compare/v0.30.0...v0.30.6) +- golang.org/x/crypto: v0.11.0 → v0.14.0 +- golang.org/x/net: v0.13.0 → v0.17.0 +- golang.org/x/sys: v0.10.0 → v0.13.0 +- golang.org/x/term: v0.10.0 → v0.13.0 +- golang.org/x/text: v0.11.0 → v0.13.0 + +### Removed +_Nothing has changed._ + + + +# v1.28.2 + + +## Downloads for v1.28.2 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes.tar.gz) | f6e13f2632697aab3ce6230d777240dd3d9c23b65eba7ff7d1df5d330e4dd926f8f439b77d823f8d08f44ddcd7eeca476af6d83eaa29cf623e86f2e4f315074a +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-src.tar.gz) | 3c21536962251eb199e4b0f42379cdfa172e826b10a28d8946df23bb8bae5e12d09647448d1f7a9c7146166178dd38398dee308dcc1e604000be908e1e0bbe89 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-darwin-amd64.tar.gz) | b85d0f2da76708a934cb8cecdf08a2d7c146c8f8209f49deab82b01c15842cf3c0631e01977af20230e69d478dfb21b5bf6acf9fc985d9ae27d1126f7a9f1112 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-darwin-arm64.tar.gz) | 0e556a34b3c659c45a9368b8b0d709831bdfa6562adb48dd5c924085806f3e1b76d4ba3b5dd719bc2d126f1bd640ddc94ddbd37515168a4de1a358b4605e031a +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-linux-386.tar.gz) | a091434bc89f762655fb76e301c9287297ab48079118eb045589d6ef246fcda307a1799732178c568ea8c64211b5bdae3ba7a836bdd60d1c61bb7dcb7b7ee324 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-linux-amd64.tar.gz) | ab534cd06d8cc89d1288590cfae98415facaa7db2f481d8f6be0a20574d2990cc55348cf98386c34df7788aa80ff018fc844816a2b605bcb350a82d752738fdc +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-linux-arm.tar.gz) | 31fb2570bd4dff5ad9f6525e33fa80847ee35d2804a1c81af8ce27855a1b4d8267bea3b522ac90bd49bc5c6a9a9fd9388a14899ebd48baac214a98644f357e02 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-linux-arm64.tar.gz) | 2304f6888752dd22e898526df091b66aae85835690e922a8e017d57e077dd1c8fcdfca16fc5aba94e9fb51ad800832305a574aee24c6cfa5d37278c8e28a144e +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-linux-ppc64le.tar.gz) | 5dbb4fefd197b14ccdc3b82d088b0e09987b1a8afbf47e03abb6707c455716658ed95d7e2dce7e5d7e12981febd53eebd0ed6296d282d2ce317c191a2c8116e9 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-linux-s390x.tar.gz) | 455e436244ff306604eb5d8a230a24186799ef5c462b7f278bb2d62e36245639db7a0d89241682dc201351a4c648ba788acdd7ac73a486cd6db426e1079ed87b +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-windows-386.tar.gz) | 96a4cf768fb59ad626a383bddcc9bab433f9d309ba3b06a8c9d927799a5ee6c4645412d22a190255c9a3e7104bf6d914f5976fcdd39876bdf1598c72dabb68a7 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-windows-amd64.tar.gz) | e7075cf3f0103edd22962fdb5a9adb4f80249a4adc8309794fc15f2f3d8e934feee6adc47c1724cc3a1c497d9cc0d4afbe0d66511042670cd2f1da1d82c70ba3 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-client-windows-arm64.tar.gz) | d230193bc1f73f6834f0a9e919b673b7fcd645343773d9cf05b33ce95d81d5ed6f8efa1620e7099e8291beab47a2f3e058dde25a96fd0873b2de2ab28e7c3b7d + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-server-linux-amd64.tar.gz) | cd54f2f48733806208d3b1585ed307bebf91893e250a1eb3e18355c9a9e6d1f75a70966cb66165be2fbb8566e5368b14f66f63a8929fbceca30aa73bfd491441 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-server-linux-arm64.tar.gz) | e879568fb40ac54b897ac52e39aa3077278f9ea502c4b7d639a247d503be85db623c264cc30300016ac1651db2c93bc82420decb5e904af396f54e40d3aa33ff +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-server-linux-ppc64le.tar.gz) | 212ef04a6d443f239fa6ecb34df5f4fc93f172dab5a5d2931a0554eafdd3901e8504f6e8918712c44f843f3be55adb83c241839d60a9de1d4b6988035f47dc6d +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-server-linux-s390x.tar.gz) | b7efe45fbb811ee09e0c8daec0b608546f065c389e2b480c0d5178ea778690f11fc3d880a84baba2331bb30042898a616b5a4f7934456543aaa9b3fc0169f923 + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-node-linux-amd64.tar.gz) | adafa3beb4525d898a602ec87f1d2b35ad89f71f6b89aad81ee0cdff97a2916ef323a21b5007d61bb0453e12d3058ad21861184049eb22c26b86be4dc268e7d9 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-node-linux-arm64.tar.gz) | c9ece93728bc2712004d508c37e692add5cc358437ee0b209ef062b81db137cccf85bf9f4560c6436c0b83193a4ba23fa28e06dd432281b946f235aa30e88842 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-node-linux-ppc64le.tar.gz) | 85918f6235563c10d58ae18e0a1918011492c4f812a753ba185447b6080aae88ccf94c6008519694fb9fca4b2a9cc02dec6ae91f1ca55a489b383d85c8f7fa9c +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-node-linux-s390x.tar.gz) | ea59ee997d3df3d89405be4943dbe8f76cc8066e98173db30f0f83ad06a9c60c73217004f837f14310bf17228fd0c4e2d2917632ddda3cc3e483c353edc38745 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.28.2/kubernetes-node-windows-amd64.tar.gz) | b8125003fcd8fa7e89d7cbc49f634cc01cd8fdf198a3325f22e5bb3743923e03e82c3d4a7545b32b0da82c950b8d887997e502839359027f194de4a0a4774654 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.28.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.28.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.28.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.28.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.28.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) +[registry.k8s.io/kubectl:v1.28.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-s390x) + +## Changelog since v1.28.1 + +## Changes by Kind + +### API Change + +- Fixed a bug where CEL expressions in CRD validation rules would incorrectly compute a high estimated cost for functions that return strings, lists or maps. + The incorrect cost was evident when the result of a function was used in subsequent operations. ([#119807](https://github.com/kubernetes/kubernetes/pull/119807), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth and Cloud Provider] +- Mark Job onPodConditions as optional in pod failure policy ([#120208](https://github.com/kubernetes/kubernetes/pull/120208), [@mimowo](https://github.com/mimowo)) [SIG API Machinery and Apps] + +### Feature + +- Kubernetes is now built with Go 1.20.8 ([#120495](https://github.com/kubernetes/kubernetes/pull/120495), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] + +### Bug or Regression + +- Fix OpenAPI v3 not being cleaned up after deleting APIServices ([#120108](https://github.com/kubernetes/kubernetes/pull/120108), [@tnqn](https://github.com/tnqn)) [SIG API Machinery and Testing] +- Fix a 1.28 regression in scheduler: a pod with concurrent events could incorrectly get moved to the unschedulable queue where it could got stuck until the next periodic purging after 5 minutes if there was no other event for it. ([#120445](https://github.com/kubernetes/kubernetes/pull/120445), [@pohly](https://github.com/pohly)) [SIG Scheduling] +- Fix a concurrent map access in TopologyCache's `HasPopulatedHints` method. ([#120372](https://github.com/kubernetes/kubernetes/pull/120372), [@Miciah](https://github.com/Miciah)) [SIG Network] +- Fixed a 1.26 regression scheduling bug by ensuring that preemption is skipped when a PreFilter plugin returns `UnschedulableAndUnresolvable` ([#119951](https://github.com/kubernetes/kubernetes/pull/119951), [@sanposhiho](https://github.com/sanposhiho)) [SIG Scheduling] +- Fixed a 1.27 scheduling regression that PostFilter plugin may not function if previous PreFilter plugins return Skip ([#119942](https://github.com/kubernetes/kubernetes/pull/119942), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Fixed a 1.28 regression around restarting init containers in the right order relative to normal containers ([#120440](https://github.com/kubernetes/kubernetes/pull/120440), [@gjkim42](https://github.com/gjkim42)) [SIG Node and Testing] +- Fixed a regression in default 1.27 configurations in kube-apiserver: fixed the AggregatedDiscoveryEndpoint feature (beta in 1.27+) to successfully fetch discovery information from aggregated API servers that do not check `Accept` headers when serving the `/apis` endpoint ([#120359](https://github.com/kubernetes/kubernetes/pull/120359), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery] +- Fixes a 1.28 regression handling negative index json patches ([#120329](https://github.com/kubernetes/kubernetes/pull/120329), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] +- Fixes a bug where images pinned by the container runtime can be garbage collected by kubelet. ([#120053](https://github.com/kubernetes/kubernetes/pull/120053), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Ignore context canceled from validate and mutate webhook ([#120155](https://github.com/kubernetes/kubernetes/pull/120155), [@divyasri537](https://github.com/divyasri537)) [SIG API Machinery] +- Kubeadm: fix nil pointer when etcd member is already removed ([#120010](https://github.com/kubernetes/kubernetes/pull/120010), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v4.12.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v4.12.0) +- github.com/google/cel-go: [v0.16.0 → v0.16.1](https://github.com/google/cel-go/compare/v0.16.0...v0.16.1) + +### Removed +_Nothing has changed._ + + + # v1.28.1 diff --git a/CHANGELOG/CHANGELOG-1.29.md b/CHANGELOG/CHANGELOG-1.29.md new file mode 100644 index 0000000000000..b00c46a959ef5 --- /dev/null +++ b/CHANGELOG/CHANGELOG-1.29.md @@ -0,0 +1,438 @@ + + +- [v1.29.0-alpha.2](#v1290-alpha2) + - [Downloads for v1.29.0-alpha.2](#downloads-for-v1290-alpha2) + - [Source Code](#source-code) + - [Client Binaries](#client-binaries) + - [Server Binaries](#server-binaries) + - [Node Binaries](#node-binaries) + - [Container Images](#container-images) + - [Changelog since v1.29.0-alpha.1](#changelog-since-v1290-alpha1) + - [Changes by Kind](#changes-by-kind) + - [Feature](#feature) + - [Failing Test](#failing-test) + - [Bug or Regression](#bug-or-regression) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) + - [Dependencies](#dependencies) + - [Added](#added) + - [Changed](#changed) + - [Removed](#removed) +- [v1.29.0-alpha.1](#v1290-alpha1) + - [Downloads for v1.29.0-alpha.1](#downloads-for-v1290-alpha1) + - [Source Code](#source-code-1) + - [Client Binaries](#client-binaries-1) + - [Server Binaries](#server-binaries-1) + - [Node Binaries](#node-binaries-1) + - [Container Images](#container-images-1) + - [Changelog since v1.28.0](#changelog-since-v1280) + - [Changes by Kind](#changes-by-kind-1) + - [Deprecation](#deprecation) + - [API Change](#api-change) + - [Feature](#feature-1) + - [Documentation](#documentation) + - [Failing Test](#failing-test-1) + - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) + - [Dependencies](#dependencies-1) + - [Added](#added-1) + - [Changed](#changed-1) + - [Removed](#removed-1) + + + +# v1.29.0-alpha.2 + + +## Downloads for v1.29.0-alpha.2 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes.tar.gz) | 138f47b2c53030e171d368d382c911048ce5d8387450e5e6717f09ac8cf6289b6c878046912130d58d7814509bbc45dbc19d6ee4f24404321ea18b24ebab2a36 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-src.tar.gz) | 73ab06309d6f6cbcb8a417c068367b670a04dcbe90574a7906201dd70b9c322cd052818114b746a4d61b7bce6115ae547eaafc955c41053898a315c968db2f36 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | c9604fbb9e848a4b3dc85ee2836f74b4ccd321e4c72d22b2d4558eb0f0c3833bff35d0c36602c13c5c5c79e9233fda874bfa85433291ab3484cf61c9012ee515 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-darwin-arm64.tar.gz) | fed42ecbfc20b5f63ac48bbb9b73abc4b72aca76ac8bdd51b9ea6af053b1fc6a8e63b5e11f9d14c4814f03b49531da2536f1342cda2da03514c44ccf05c311b0 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 93c61229d7b07a476296b5b800c853c8e984101d5077fc19a195673f7543e7d2eb2599311c1846c91ef1f7ae29c3e05b6f41b873e92a3429563e3d83900050da +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | 4260b49733f6b0967c504e2246b455b2348b487e84f7a019fda8b4a87d43d27a03e7ed55b505764c14f2079c4c3d71c68d77f981b604e13e7210680f45ee66e3 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | 4e837fd2f55cbb5f93cdf60235511a85635485962f00e0378a95a7ff846eb86b7bf053203ab353b294131b2e2663d0e783dae79c18601d4d66f98a6e5152e96e +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | 6f3954d2adc289879984d18c2605110a7d5f0a5f6366233c25adf3a742f8dc1183e8a4d4747de8077af1045a259b150e0e86b27e10d683aa8decdc760ac6279b +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 741b76827ff9e810e490d8698eb7620826a16e978e5c7744a1fa0e65124690cfc9601e7f1c8f50e77f25185ba3176789ddcb7d5caaddde66436c31658bacde1d +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 0c635883e2f9caca03bcf3b42ba0b479f44c8cc2a3d5dd425b0fee278f3e884bef0e897fe51cbf00bb0bc061371805d9f9cbccf839477671f92e078c04728735 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-windows-386.tar.gz) | ebddbb358fd2d817908069eb66744dc62cae56ad470b1e36c6ebd0d2284e79ae5b9a5f8a86fef365f30b34e14093827ad736814241014f597e2ac88788102cf4 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | 01a451a809cd45e7916a3e982e2b94d372accab9dfe20667e95c10d56f9194b997721c0c219ff7ff97828b6466108eec6e57dcb33e3e3b0c5f770af1514a9f1a +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-client-windows-arm64.tar.gz) | 473ba648ffde41fd5b63374cc1595eb43b873808c6b0cc5e939628937f3f7fb36dba4b7c7c8ef03408d557442094ec22e12c03f40be137f9cc99761b4cc1a1f8 + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | c3f7abcee3fdcf6f311b5de0bfe037318e646641c1ce311950d920252623cca285d1f1cef0e2d936c0f981edc1c725897a42aa9e03b77fe5f76f1090665d967f +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 17614842df6bb528434b8b063b1d1c3efc8e4eff9cbc182f049d811f68e08514026fbb616199a3dee97e62ce2fd1eda0b9778d8e74040e645c482cfe6a18a8b4 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 2f818035ef199a7745e24d2ce86abf6c52e351d7922885e264c5d07db3e0f21048c32db85f3044e01443abd87a45f92df52fda44e8df05000754b03f34132f2f +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | 96a34c768f347f23c46f990a8f6ddf3127b13f7a183453b92eb7bc27ce896767f31b38317a6ae5a11f2d4b459ec9564385f8abe61082a4165928edfee0c9765e + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | 66845cf86e32c19be9d339417a4772b9bcf51b2bf4d1ef5acc2e9eb006bbd19b3c036aa3721b3d8fe08b6fb82284ba25a6ecb5eb7b84f657cc968224d028f22c +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | 98902ee33242f9e78091433115804d54eafde24903a3515f0300f60c0273c7c0494666c221ce418d79e715f8ecf654f0edabc5b69765da26f83a812e963b5afb +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 82f1213b5942c5c1576afadb4b066dfa1427c7709adf6ba636b9a52dfdb1b20f62b1cc0436b265e714fbee08c71d8786295d2439c10cc05bd58b2ab2a87611d4 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | 7cb8cb65195c5dd63329d02907cdbb0f5473066606c108f4516570f449623f93b1ca822d5a00fad063ec8630e956fa53a0ab530a8487bccb01810943847d4942 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | 1222e2d7dbaf7920e1ba927231cc7e275641cf0939be1520632353df6219bbcb3b49515d084e7f2320a2ff59b2de9fee252d8f5e9c48d7509f1174c6cb357b66 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.29.0-alpha.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.29.0-alpha.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.29.0-alpha.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.29.0-alpha.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.29.0-alpha.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) +[registry.k8s.io/kubectl:v1.29.0-alpha.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-s390x) + +## Changelog since v1.29.0-alpha.1 + +## Changes by Kind + +### Feature + +- Adds `apiserver_watch_list_duration_seconds` metrics. Which will measure response latency distribution in seconds for watch list requests broken by group, version, resource and scope ([#120490](https://github.com/kubernetes/kubernetes/pull/120490), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Instrumentation] +- Allow-list of metric labels can be configured by supplying a manifest using the --allow-metric-labels-manifest flag ([#118299](https://github.com/kubernetes/kubernetes/pull/118299), [@rexagod](https://github.com/rexagod)) [SIG Architecture and Instrumentation] +- Bump distroless-iptables to 0.3.3 based on Go 1.21.2 ([#121073](https://github.com/kubernetes/kubernetes/pull/121073), [@cpanato](https://github.com/cpanato)) [SIG Testing] +- Implements API for streaming for the etcd store implementation + + When sendInitialEvents ListOption is set together with watch=true, it begins the watch stream with synthetic init events followed by a synthetic "Bookmark" after which the server continues streaming events. ([#119557](https://github.com/kubernetes/kubernetes/pull/119557), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] +- Kubelet, when using cloud provider external, initializes temporary the node addresses using the --node-ip flag values if set, until the cloud provider overrides it. ([#121028](https://github.com/kubernetes/kubernetes/pull/121028), [@aojea](https://github.com/aojea)) [SIG Cloud Provider and Node] +- Kubernetes is now built with Go 1.21.2 ([#121021](https://github.com/kubernetes/kubernetes/pull/121021), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- Migrated the volumebinding scheduler plugins to use [contextual logging](https://k8s.io/docs/concepts/cluster-administration/system-logs/#contextual-logging). ([#116803](https://github.com/kubernetes/kubernetes/pull/116803), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG Instrumentation, Scheduling and Storage] +- The kube-apiserver exposes four new metrics to inform about errors on the clusterIP and nodePort allocation logic ([#120843](https://github.com/kubernetes/kubernetes/pull/120843), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] + +### Failing Test + +- K8s.io/dynamic-resource-allocation: DRA drivers updating to this release are compatible with Kubernetes 1.27 and 1.28. ([#120868](https://github.com/kubernetes/kubernetes/pull/120868), [@pohly](https://github.com/pohly)) [SIG Node] + +### Bug or Regression + +- Cluster-bootstrap: improve the security of the functions responsible for generation and validation of bootstrap tokens ([#120400](https://github.com/kubernetes/kubernetes/pull/120400), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Security] +- Do not fail volume attach or publish operation at kubelet if target path directory already exists on the node. ([#119735](https://github.com/kubernetes/kubernetes/pull/119735), [@akankshapanse](https://github.com/akankshapanse)) [SIG Storage] +- Fix regression with adding aggregated apiservices panicking and affected health check introduced in release v1.28.0 ([#120814](https://github.com/kubernetes/kubernetes/pull/120814), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery and Testing] +- Fixed a bug where containers would not start on cgroupv2 systems where swap is disabled. ([#120784](https://github.com/kubernetes/kubernetes/pull/120784), [@elezar](https://github.com/elezar)) [SIG Node] +- Fixed a regression in kube-proxy where it might refuse to start if given + single-stack IPv6 configuration options on a node that has both IPv4 and + IPv6 IPs. ([#121008](https://github.com/kubernetes/kubernetes/pull/121008), [@danwinship](https://github.com/danwinship)) [SIG Network] +- Fixed attaching volumes after detach errors. Now volumes that failed to detach are not treated as attached, Kubernetes will make sure they are fully attached before they can be used by pods. ([#120595](https://github.com/kubernetes/kubernetes/pull/120595), [@jsafrane](https://github.com/jsafrane)) [SIG Apps and Storage] +- Fixes a regression (CLIENTSET_PKG: unbound variable) when invoking deprecated generate-groups.sh script ([#120877](https://github.com/kubernetes/kubernetes/pull/120877), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] +- K8s.io/dynamic-resource-allocation/controller: UnsuitableNodes did not handle a mix of allocated and unallocated claims correctly. ([#120338](https://github.com/kubernetes/kubernetes/pull/120338), [@pohly](https://github.com/pohly)) [SIG Node] +- K8s.io/dynamic-resource-allocation: handle a selected node which isn't listed as potential node ([#120871](https://github.com/kubernetes/kubernetes/pull/120871), [@pohly](https://github.com/pohly)) [SIG Node] +- Kubeadm: fix the bug that kubeadm always do CRI detection when --config is passed even if it is not required by the subcommand ([#120828](https://github.com/kubernetes/kubernetes/pull/120828), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] + +### Other (Cleanup or Flake) + +- Client-go: k8s.io/client-go/tools events and record packages have new APIs for specifying a context and logger ([#120729](https://github.com/kubernetes/kubernetes/pull/120729), [@pohly](https://github.com/pohly)) [SIG API Machinery and Instrumentation] +- Deprecated the `--cloud-provider` and `--cloud-config` CLI parameters in kube-apiserver. + These parameters will be removed in a future release. ([#120903](https://github.com/kubernetes/kubernetes/pull/120903), [@dims](https://github.com/dims)) [SIG API Machinery] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/emicklei/go-restful/v3: [v3.9.0 → v3.11.0](https://github.com/emicklei/go-restful/v3/compare/v3.9.0...v3.11.0) +- github.com/onsi/ginkgo/v2: [v2.9.4 → v2.13.0](https://github.com/onsi/ginkgo/v2/compare/v2.9.4...v2.13.0) +- github.com/onsi/gomega: [v1.27.6 → v1.28.0](https://github.com/onsi/gomega/compare/v1.27.6...v1.28.0) +- golang.org/x/crypto: v0.11.0 → v0.12.0 +- golang.org/x/mod: v0.10.0 → v0.12.0 +- golang.org/x/net: v0.13.0 → v0.14.0 +- golang.org/x/sync: v0.2.0 → v0.3.0 +- golang.org/x/sys: v0.10.0 → v0.12.0 +- golang.org/x/term: v0.10.0 → v0.11.0 +- golang.org/x/text: v0.11.0 → v0.12.0 +- golang.org/x/tools: v0.8.0 → v0.12.0 + +### Removed +_Nothing has changed._ + + + +# v1.29.0-alpha.1 + + +## Downloads for v1.29.0-alpha.1 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes.tar.gz) | 107062e8da7c416206f18b4376e9e0c2ca97b37c720a047f2bc6cf8a1bdc2b41e84defd0a29794d9562f3957932c0786a5647450b41d2850a9b328826bb3248d +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-src.tar.gz) | 8182774faa5547f496642fdad7e2617a4d07d75af8ddf85fb8246087ddffab596528ffde29500adc9945d4e263fce766927ed81396a11f88876b3fa76628a371 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | ac9a08cd98af5eb27f8dde895510db536098dd52ee89682e7f103c793cb99cddcd992e3a349d526854caaa27970aa1ef964db4cc27d1009576fb604bf0c1cdf1 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-darwin-arm64.tar.gz) | 28744076618dcd7eca4175726d7f3ac67fe94f08f1b6ca4373b134a6402c0f5203f1146d79a211443c751b2f2825df3507166fc3c5e40a55d545c3e5d2a48e56 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-linux-386.tar.gz) | 0207a2571b6d0e6e55f36af9d2ed27f31eacfb23f2f54dd2eb8fbc38ef5b033edb24fb9a5ece7e7020fd921a9c841fff435512d12421bfa13294cc9c297eb877 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 57fc39ba259ae61b88c23fd136904395abc23c44f4b4db3e2922827ec7e6def92bc77364de3e2f6b54b27bb4b5e42e9cf4d1c0aa6d12c4a5a17788d9f996d9ad +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | 53a54d3fbda46162139a90616d708727c23d3aae0a2618197df5ac443ac3d49980a62034e3f2514f1a1622e4ce5f6e821d2124a61a9e63ce6d29268b33292949 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | ee3ca4626c802168db71ad55c1d8b45c03ec774c146dd6da245e5bb26bf7fd6728a477f1ad0c5094967a0423f94e35e4458c6716f3abe005e8fc55ae354174cf +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 60cd35076dd4afb9005349003031fa9f1802a2a120fbbe842d6fd061a1bca39baabcbb18fb4b6610a5ca626fc64e1d780c7aadb203d674697905489187a415ce +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 68fdd0fc35dfd6fae0d25d7834270c94b16ae860fccc4253e7c347ce165d10cadc190e8b320fd2c4afd508afc6c10f246b8a5f0148ca1b1d56f7b2843cc39d30 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-windows-386.tar.gz) | 0c5d3dbfaaffa81726945510c972cc15895ea87bcd43b798675465fdadaa4d2d9597cb4fc6baee9ee719c919d1f46a9390c15cb0da60250f41eb4fcc3337b337 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 2e519867cbc793ea1c1e45f040de81b49c70b9b42fac072ac5cac36e8de71f0dddd0c64354631bcb2b3af36a0f377333c0cd885c2df36ef8cd7e6c8fd5628aa4 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-client-windows-arm64.tar.gz) | 1a80cad80c1c9f753a38e6c951b771b0df820455141f40ba44e227f6acc81b59454f8dbff12e83c61bf647eaa1ff98944930969a99c96a087a35921f4e6ac968 + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | c74a3f7bdd16095fb366b4313e50984f2ee7cb99c77ad2bcccea066756ce6e0fc45f4528b79c8cb7e6370430ee2d03fa6bc10ca87a59d8684a59e1ebd3524afd +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | b6844b5769fd5687525dcedca42c7bb036f6acad65d3de3c8cda46dbbe0ac23c289fdb7fbf15f1c37184498d6a1fb018e41e1c97ded4581f045ad2039e3ddec2 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | a15eb2db4821454974920a987bb1e73bc4ee638b845b07f35cab55dcf482c142d3cdaed347bfa0452d5311b3d9152463a3dae1d176b6101ed081ec594e0d526c +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 60e24d8b4902821b436b5adebd6594ef0db79802d64787a1424aa6536873e2d749dfc6ebc2eb81db3240c925500a3e927ee7385188f866c28123736459e19b7b + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | 44832c7b90c88e7ca70737bad8d50ee8ba434ee7a94940f9d45beda9e9aadc7e2c973b65fcb986216229796a5807dae2470dbcf1ade5c075d86011eefe21509b +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | a13862d9bae0ff358377afc60f5222490a8e6bb7197d4a7d568edd4f150348f7a3dc7342129cd2d5c5353d2d43349b97c854df3e8886a8d52aedb95c634e3b5a +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | 57348f82bb4db8c230d8dffdef513ed75d7b267b226a5d15b3deb9783f8ed56fe40f8ce018ab34c28f9f8210b2e41b0f55d185dcdbaf912dd57e2ea78f8d3c53 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 2013eb4746e818cf336e0fee37650df98c19876030397803abce9531730eb0b95e6284f5a2abdd2b97090a67d07fd7a9c74c84fc7b4b83f0bce04a6dc9ad2555 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.29.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 3a4d63e2117cdbebc655e674bb017e246c263e893fc0ca3e8dc0091d6d9f96c9f0756c0fa8b45ba461502ae432f908ea922c21378b82ff3990b271f42eedc138 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.29.0-alpha.1](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.29.0-alpha.1](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.29.0-alpha.1](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.29.0-alpha.1](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.29.0-alpha.1](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) +[registry.k8s.io/kubectl:v1.29.0-alpha.1](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kubectl-s390x) + +## Changelog since v1.28.0 + +## Changes by Kind + +### Deprecation + +- #### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.: + + ([#119495](https://github.com/kubernetes/kubernetes/pull/119495), [@bzsuni](https://github.com/bzsuni)) [SIG API Machinery] + +### API Change + +- Added a new `ipMode` field to the `.status` of Services where `type` is set to `LoadBalancer`. + The new field is behind the `LoadBalancerIPMode` feature gate. ([#119937](https://github.com/kubernetes/kubernetes/pull/119937), [@RyanAoh](https://github.com/RyanAoh)) [SIG API Machinery, Apps, Cloud Provider, Network and Testing] +- Fixed a bug where CEL expressions in CRD validation rules would incorrectly compute a high estimated cost for functions that return strings, lists or maps. + The incorrect cost was evident when the result of a function was used in subsequent operations. ([#119800](https://github.com/kubernetes/kubernetes/pull/119800), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth and Cloud Provider] +- Go API: the ResourceRequirements struct needs to be replaced with VolumeResourceRequirements for use with volumes. ([#118653](https://github.com/kubernetes/kubernetes/pull/118653), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, Node, Scheduling, Storage and Testing] +- Kube-apiserver: adds --authentication-config flag for reading AuthenticationConfiguration files. --authentication-config flag is mutually exclusive with the existing --oidc-* flags. ([#119142](https://github.com/kubernetes/kubernetes/pull/119142), [@aramase](https://github.com/aramase)) [SIG API Machinery, Auth and Testing] +- Kube-scheduler component config (KubeSchedulerConfiguration) kubescheduler.config.k8s.io/v1beta3 is removed in v1.29. Migrate kube-scheduler configuration files to kubescheduler.config.k8s.io/v1. ([#119994](https://github.com/kubernetes/kubernetes/pull/119994), [@SataQiu](https://github.com/SataQiu)) [SIG Scheduling and Testing] +- Mark the onPodConditions field as optional in Job's pod failure policy. ([#120204](https://github.com/kubernetes/kubernetes/pull/120204), [@mimowo](https://github.com/mimowo)) [SIG API Machinery and Apps] +- Retry NodeStageVolume calls if CSI node driver is not running ([#120330](https://github.com/kubernetes/kubernetes/pull/120330), [@rohitssingh](https://github.com/rohitssingh)) [SIG Apps, Storage and Testing] +- The kube-scheduler `selectorSpread` plugin has been removed, please use the `podTopologySpread` plugin instead. ([#117720](https://github.com/kubernetes/kubernetes/pull/117720), [@kerthcet](https://github.com/kerthcet)) [SIG Scheduling] + +### Feature + +- --sync-frequency will not affect the update interval of volumes that use ConfigMaps or Secrets when the configMapAndSecretChangeDetectionStrategy is set to Cache. The update interval is only affected by node.alpha.kubernetes.io/ttl node annotation." ([#120255](https://github.com/kubernetes/kubernetes/pull/120255), [@likakuli](https://github.com/likakuli)) [SIG Node] +- Add a new scheduler metric, `pod_scheduling_sli_duration_seconds`, and start the deprecation for `pod_scheduling_duration_seconds`. ([#119049](https://github.com/kubernetes/kubernetes/pull/119049), [@helayoty](https://github.com/helayoty)) [SIG Instrumentation, Scheduling and Testing] +- Added apiserver_envelope_encryption_dek_cache_filled to measure number of records in data encryption key(DEK) cache. ([#119878](https://github.com/kubernetes/kubernetes/pull/119878), [@ritazh](https://github.com/ritazh)) [SIG API Machinery and Auth] +- Added kubectl node drain helper callbacks `OnPodDeletionOrEvictionStarted` and `OnPodDeletionOrEvictionFailed`; people extending `kubectl` can use these new callbacks for more granularity. + - Deprecated the `OnPodDeletedOrEvicted` node drain helper callback. ([#117502](https://github.com/kubernetes/kubernetes/pull/117502), [@adilGhaffarDev](https://github.com/adilGhaffarDev)) [SIG CLI] +- Adding apiserver identity to the following metrics: + apiserver_envelope_encryption_key_id_hash_total, apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds, apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds, apiserver_encryption_config_controller_automatic_reload_failures_total, apiserver_encryption_config_controller_automatic_reload_success_total, apiserver_encryption_config_controller_automatic_reload_last_timestamp_seconds + + Fix bug to surface events for the following metrics: apiserver_encryption_config_controller_automatic_reload_failures_total, apiserver_encryption_config_controller_automatic_reload_last_timestamp_seconds, apiserver_encryption_config_controller_automatic_reload_success_total ([#120438](https://github.com/kubernetes/kubernetes/pull/120438), [@ritazh](https://github.com/ritazh)) [SIG API Machinery, Auth, Instrumentation and Testing] +- Bump distroless-iptables to 0.3.2 based on Go 1.21.1 ([#120527](https://github.com/kubernetes/kubernetes/pull/120527), [@cpanato](https://github.com/cpanato)) [SIG Testing] +- Changed `kubectl help` to display basic details for subcommands from plugins ([#116752](https://github.com/kubernetes/kubernetes/pull/116752), [@xvzf](https://github.com/xvzf)) [SIG CLI] +- Changed the `KMSv2KDF` feature gate to be enabled by default. ([#120433](https://github.com/kubernetes/kubernetes/pull/120433), [@enj](https://github.com/enj)) [SIG API Machinery, Auth and Testing] +- Graduated the following kubelet resource metrics to **general availability**: + - `container_cpu_usage_seconds_total` + - `container_memory_working_set_bytes` + - `container_start_time_seconds` + - `node_cpu_usage_seconds_total` + - `node_memory_working_set_bytes` + - `pod_cpu_usage_seconds_total` + - `pod_memory_working_set_bytes` + - `resource_scrape_error` + + Deprecated (renamed) `scrape_error` in favor of `resource_scrape_error` ([#116897](https://github.com/kubernetes/kubernetes/pull/116897), [@Richabanker](https://github.com/Richabanker)) [SIG Architecture, Instrumentation, Node and Testing] +- Graduation API List chunking (aka pagination) feature to stable ([#119503](https://github.com/kubernetes/kubernetes/pull/119503), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Cloud Provider and Testing] +- Implements API for streaming for the etcd store implementation + + When sendInitialEvents ListOption is set together with watch=true, it begins the watch stream with synthetic init events followed by a synthetic "Bookmark" after which the server continues streaming events. ([#119557](https://github.com/kubernetes/kubernetes/pull/119557), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] +- Improve memory usage of kube-scheduler by dropping the `.metadata.managedFields` field that kube-scheduler doesn't require. ([#119556](https://github.com/kubernetes/kubernetes/pull/119556), [@linxiulei](https://github.com/linxiulei)) [SIG Scheduling] +- In a scheduler with Permit plugins, when a Pod is rejected during WaitOnPermit, the scheduler records the plugin. + The scheduler will use the record to honor cluster events and queueing hints registered for the plugin, to inform whether to retry the pod. ([#119785](https://github.com/kubernetes/kubernetes/pull/119785), [@sanposhiho](https://github.com/sanposhiho)) [SIG Scheduling and Testing] +- In tree cloud providers are now switched off by default. Please use DisableCloudProviders and DisableKubeletCloudCredentialProvider feature flags if you still need this functionality. ([#117503](https://github.com/kubernetes/kubernetes/pull/117503), [@dims](https://github.com/dims)) [SIG API Machinery, Cloud Provider and Testing] +- Introduce new apiserver metric apiserver_flowcontrol_current_inqueue_seats. This metric is analogous to `apiserver_flowcontrol_current_inqueue_requests` but tracks totals seats as each request can take more than 1 seat. ([#119385](https://github.com/kubernetes/kubernetes/pull/119385), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Kube-proxy don't panic on exit when the Node object changes its PodCIDR ([#120375](https://github.com/kubernetes/kubernetes/pull/120375), [@pegasas](https://github.com/pegasas)) [SIG Network] +- Kube-proxy will only install the DROP rules for invalid conntrack states if the nf_conntrack_tcp_be_liberal is not set. ([#120412](https://github.com/kubernetes/kubernetes/pull/120412), [@aojea](https://github.com/aojea)) [SIG Network] +- Kubeadm: add validation to verify that the CertificateKey is a valid hex encoded AES key ([#120064](https://github.com/kubernetes/kubernetes/pull/120064), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: promoted feature gate `EtcdLearnerMode` to beta. Learner mode for joining etcd members is now enabled by default. ([#120228](https://github.com/kubernetes/kubernetes/pull/120228), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubelet exposes latency metrics of different stages of the node startup. ([#118568](https://github.com/kubernetes/kubernetes/pull/118568), [@qiutongs](https://github.com/qiutongs)) [SIG Instrumentation, Node and Scalability] +- Kubernetes is now built with Go 1.21.1 ([#120493](https://github.com/kubernetes/kubernetes/pull/120493), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- Kubernetes is now built with go 1.21.0 ([#118996](https://github.com/kubernetes/kubernetes/pull/118996), [@cpanato](https://github.com/cpanato)) [SIG Release and Testing] +- List the pods using as an ephemeral storage volume in "Used by:" part of the output of `kubectl describe pvc ` command. ([#120427](https://github.com/kubernetes/kubernetes/pull/120427), [@MaGaroo](https://github.com/MaGaroo)) [SIG CLI] +- Migrated the nodevolumelimits scheduler plugin to use [contextual logging](https://k8s.io/docs/concepts/cluster-administration/system-logs/#contextual-logging). ([#116884](https://github.com/kubernetes/kubernetes/pull/116884), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG Instrumentation, Node, Scheduling, Storage and Testing] +- Promote ServiceNodePortStaticSubrange to stable and lock to default ([#120233](https://github.com/kubernetes/kubernetes/pull/120233), [@xuzhenglun](https://github.com/xuzhenglun)) [SIG Network] +- QueueingHint got error in its returning value. If QueueingHint returns error, the scheduler logs the error and treats the event as QueueAfterBackoff so that the Pod wouldn't be stuck in the unschedulable pod pool. ([#119290](https://github.com/kubernetes/kubernetes/pull/119290), [@carlory](https://github.com/carlory)) [SIG Node, Scheduling and Testing] +- Remove /livez livezchecks for KMS v1 and v2 to ensure KMS health does not cause kube-apiserver restart. KMS health checks are still in place as a healthz and readiness checks. ([#120583](https://github.com/kubernetes/kubernetes/pull/120583), [@ritazh](https://github.com/ritazh)) [SIG API Machinery, Auth and Testing] +- The CloudDualStackNodeIPs feature is now beta, meaning that when using + an external cloud provider that has been updated to support the feature, + you can pass comma-separated dual-stack `--node-ips` to kubelet and have + the cloud provider take both IPs into account. ([#120275](https://github.com/kubernetes/kubernetes/pull/120275), [@danwinship](https://github.com/danwinship)) [SIG API Machinery, Cloud Provider and Network] +- The Dockerfile for the kubectl image has been updated with the addition of a specific base image and essential utilities (bash and jq). ([#119592](https://github.com/kubernetes/kubernetes/pull/119592), [@rayandas](https://github.com/rayandas)) [SIG CLI, Node, Release and Testing] +- Use of secret-based service account tokens now adds an `authentication.k8s.io/legacy-token-autogenerated-secret` or `authentication.k8s.io/legacy-token-manual-secret` audit annotation containing the name of the secret used. ([#118598](https://github.com/kubernetes/kubernetes/pull/118598), [@yuanchen8911](https://github.com/yuanchen8911)) [SIG Auth, Instrumentation and Testing] +- Volume_zone plugin will consider beta labels as GA labels during the scheduling process.Therefore, if the values of the labels are the same, PVs with beta labels can also be scheduled to nodes with GA labels. ([#118923](https://github.com/kubernetes/kubernetes/pull/118923), [@AxeZhan](https://github.com/AxeZhan)) [SIG Scheduling] + +### Documentation + +- Added descriptions and examples for the situation of using kubectl rollout restart without specifying a particular deployment. ([#120118](https://github.com/kubernetes/kubernetes/pull/120118), [@Ithrael](https://github.com/Ithrael)) [SIG CLI] + +### Failing Test + +- DRA: when the scheduler has to deallocate a claim after a node became unsuitable for a pod, it might have needed more attempts than really necessary. ([#120428](https://github.com/kubernetes/kubernetes/pull/120428), [@pohly](https://github.com/pohly)) [SIG Node and Scheduling] +- E2e framework: retrying after intermittent apiserver failures was fixed in WaitForPodsResponding ([#120559](https://github.com/kubernetes/kubernetes/pull/120559), [@pohly](https://github.com/pohly)) [SIG Testing] +- KCM specific args can be passed with `/cluster` script, without affecting CCM. New variable name: `KUBE_CONTROLLER_MANAGER_TEST_ARGS`. ([#120524](https://github.com/kubernetes/kubernetes/pull/120524), [@jprzychodzen](https://github.com/jprzychodzen)) [SIG Cloud Provider] +- This contains the modified windows kubeproxy testcases with mock implementation ([#120105](https://github.com/kubernetes/kubernetes/pull/120105), [@princepereira](https://github.com/princepereira)) [SIG Network and Windows] + +### Bug or Regression + +- Added a redundant process to remove tracking finalizers from Pods that belong to Jobs. The process kicks in after the control plane marks a Job as finished ([#119944](https://github.com/kubernetes/kubernetes/pull/119944), [@Sharpz7](https://github.com/Sharpz7)) [SIG Apps] +- Allow specifying ExternalTrafficPolicy for Services with ExternalIPs. ([#119150](https://github.com/kubernetes/kubernetes/pull/119150), [@tnqn](https://github.com/tnqn)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Release and Testing] +- Exclude nodes from daemonset rolling update if the scheduling constraints are not met. This eliminates the problem of rolling update stuck of daemonset with tolerations. ([#119317](https://github.com/kubernetes/kubernetes/pull/119317), [@mochizuki875](https://github.com/mochizuki875)) [SIG Apps and Testing] +- Fix OpenAPI v3 not being cleaned up after deleting APIServices ([#120108](https://github.com/kubernetes/kubernetes/pull/120108), [@tnqn](https://github.com/tnqn)) [SIG API Machinery and Testing] +- Fix a 1.28 regression in scheduler: a pod with concurrent events could incorrectly get moved to the unschedulable queue where it could got stuck until the next periodic purging after 5 minutes if there was no other event for it. ([#120413](https://github.com/kubernetes/kubernetes/pull/120413), [@pohly](https://github.com/pohly)) [SIG Scheduling] +- Fix a bug in cronjob controller where already created jobs may be missing from the status. ([#120649](https://github.com/kubernetes/kubernetes/pull/120649), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Fix a concurrent map access in TopologyCache's `HasPopulatedHints` method. ([#118189](https://github.com/kubernetes/kubernetes/pull/118189), [@Miciah](https://github.com/Miciah)) [SIG Apps and Network] +- Fix kubectl events doesn't filter events by GroupVersion for resource with full name. ([#120119](https://github.com/kubernetes/kubernetes/pull/120119), [@Ithrael](https://github.com/Ithrael)) [SIG CLI and Testing] +- Fixed CEL estimated cost of `replace()` to handle a zero length replacement string correctly. + Previously this would cause the estimated cost to be higher than it should be. ([#120097](https://github.com/kubernetes/kubernetes/pull/120097), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery] +- Fixed a 1.26 regression scheduling bug by ensuring that preemption is skipped when a PreFilter plugin returns `UnschedulableAndUnresolvable` ([#119778](https://github.com/kubernetes/kubernetes/pull/119778), [@sanposhiho](https://github.com/sanposhiho)) [SIG Scheduling and Testing] +- Fixed a 1.27 scheduling regression that PostFilter plugin may not function if previous PreFilter plugins return Skip ([#119769](https://github.com/kubernetes/kubernetes/pull/119769), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Fixed a 1.28 regression around restarting init containers in the right order relative to normal containers ([#120281](https://github.com/kubernetes/kubernetes/pull/120281), [@gjkim42](https://github.com/gjkim42)) [SIG Node and Testing] +- Fixed a regression in default 1.27 configurations in kube-apiserver: fixed the AggregatedDiscoveryEndpoint feature (beta in 1.27+) to successfully fetch discovery information from aggregated API servers that do not check `Accept` headers when serving the `/apis` endpoint ([#119870](https://github.com/kubernetes/kubernetes/pull/119870), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery] +- Fixed an issue where a CronJob could fail to clean up Jobs when the ResourceQuota for Jobs had been reached. ([#119776](https://github.com/kubernetes/kubernetes/pull/119776), [@ASverdlov](https://github.com/ASverdlov)) [SIG Apps] +- Fixes a 1.28 regression handling negative index json patches ([#120327](https://github.com/kubernetes/kubernetes/pull/120327), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Architecture, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node and Storage] +- Fixes a bug where Services using finalizers may hold onto ClusterIP and/or NodePort allocated resources for longer than expected if the finalizer is removed using the status subresource ([#120623](https://github.com/kubernetes/kubernetes/pull/120623), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Fixes an issue where StatefulSet might not restart a pod after eviction or node failure. ([#120398](https://github.com/kubernetes/kubernetes/pull/120398), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) [SIG Apps] +- Fixes an issue with the garbagecollection controller registering duplicate event handlers if discovery requests fail. ([#117992](https://github.com/kubernetes/kubernetes/pull/117992), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixes the bug when images pinned by the container runtime can be garbage collected by kubelet ([#119986](https://github.com/kubernetes/kubernetes/pull/119986), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Fixing issue with incremental id generation for loadbalancer and endpoint in Kubeproxy mock test framework. ([#120723](https://github.com/kubernetes/kubernetes/pull/120723), [@princepereira](https://github.com/princepereira)) [SIG Network and Windows] +- If a watch with the `progressNotify` option set is to be created, and the registry hasn't provided a `newFunc`, return an error. ([#120212](https://github.com/kubernetes/kubernetes/pull/120212), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] +- Improved handling of jsonpath expressions for kubectl wait --for. It is now possible to use simple filter expressions which match on a field's content. ([#118748](https://github.com/kubernetes/kubernetes/pull/118748), [@andreaskaris](https://github.com/andreaskaris)) [SIG CLI and Testing] +- Incorporating feedback on PR #119341 ([#120087](https://github.com/kubernetes/kubernetes/pull/120087), [@divyasri537](https://github.com/divyasri537)) [SIG API Machinery] +- Kubeadm: Use universal deserializer to decode static pod. ([#120549](https://github.com/kubernetes/kubernetes/pull/120549), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: fix nil pointer when etcd member is already removed ([#119753](https://github.com/kubernetes/kubernetes/pull/119753), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: fix the bug that `--image-repository` flag is missing for some init phase sub-commands ([#120072](https://github.com/kubernetes/kubernetes/pull/120072), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: improve the logic that checks whether a systemd service exists. ([#120514](https://github.com/kubernetes/kubernetes/pull/120514), [@fengxsong](https://github.com/fengxsong)) [SIG Cluster Lifecycle] +- Kubeadm: print the default component configs for `reset` and `join` is now not supported ([#119346](https://github.com/kubernetes/kubernetes/pull/119346), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle] +- Kubeadm: remove 'system:masters' organization from etcd/healthcheck-client certificate. ([#119859](https://github.com/kubernetes/kubernetes/pull/119859), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl prune v2: Switch annotation from `contains-group-resources` to `contains-group-kinds`, + because this is what we defined in the KEP and is clearer to end-users. Although the functionality is + in alpha, we will recognize the prior annotation; this migration support will be removed in beta/GA. ([#118942](https://github.com/kubernetes/kubernetes/pull/118942), [@justinsb](https://github.com/justinsb)) [SIG CLI] +- Kubectl will not print events if --show-events=false argument is passed to describe PVC subcommand. ([#120380](https://github.com/kubernetes/kubernetes/pull/120380), [@MaGaroo](https://github.com/MaGaroo)) [SIG CLI] +- More accurate requeueing in scheduling queue for Pods rejected by the temporal failure (e.g., temporal failure on kube-apiserver.) ([#119105](https://github.com/kubernetes/kubernetes/pull/119105), [@sanposhiho](https://github.com/sanposhiho)) [SIG Scheduling and Testing] +- No-op and GC related updates to cluster trust bundles no longer require attest authorization when the ClusterTrustBundleAttest plugin is enabled. ([#120779](https://github.com/kubernetes/kubernetes/pull/120779), [@enj](https://github.com/enj)) [SIG Auth] +- Reintroduce resourcequota.NewMonitor constructor for other consumers ([#120777](https://github.com/kubernetes/kubernetes/pull/120777), [@atiratree](https://github.com/atiratree)) [SIG Apps] +- Scheduler: Fix field apiVersion is missing from events reported from taint manager ([#114095](https://github.com/kubernetes/kubernetes/pull/114095), [@aimuz](https://github.com/aimuz)) [SIG Apps, Node and Scheduling] +- Service Controller: update load balancer hosts after node's ProviderID is updated ([#120492](https://github.com/kubernetes/kubernetes/pull/120492), [@cezarygerard](https://github.com/cezarygerard)) [SIG Cloud Provider and Network] +- Setting the `status.loadBalancer` of a Service whose `spec.type` is not `"LoadBalancer"` was previously allowed, but any update to the `metadata` or `spec` would wipe that field. Setting this field is no longer permitted unless `spec.type` is `"LoadBalancer"`. In the very unlikely event that this has unexpected impact, you can enable the `AllowServiceLBStatusOnNonLB` feature gate, which will restore the previous behavior. If you do need to set this, please file an issue with the Kubernetes project to help contributors understand why you need it. ([#119789](https://github.com/kubernetes/kubernetes/pull/119789), [@thockin](https://github.com/thockin)) [SIG Apps and Testing] +- Sometimes, the scheduler incorrectly placed a pod in the "unschedulable" queue instead of the "backoff" queue. This happened when some plugin previously declared the pod as "unschedulable" and then in a later attempt encounters some other error. Scheduling of that pod then got delayed by up to five minutes, after which periodic flushing moved the pod back into the "active" queue. ([#120334](https://github.com/kubernetes/kubernetes/pull/120334), [@pohly](https://github.com/pohly)) [SIG Scheduling] +- The `--bind-address` parameter in kube-proxy is misleading, no port is opened with this address. Instead it is translated internally to "nodeIP". The nodeIPs for both families are now taken from the Node object if `--bind-address` is unspecified or set to the "any" address (0.0.0.0 or ::). It is recommended to leave `--bind-address` unspecified, and in particular avoid to set it to localhost (127.0.0.1 or ::1) ([#119525](https://github.com/kubernetes/kubernetes/pull/119525), [@uablrek](https://github.com/uablrek)) [SIG Network and Scalability] + +### Other (Cleanup or Flake) + +- Add context to "caches populated" log messages. ([#119796](https://github.com/kubernetes/kubernetes/pull/119796), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Add download the cni binary for the corresponding arch in local-up-cluster.sh ([#120312](https://github.com/kubernetes/kubernetes/pull/120312), [@HirazawaUi](https://github.com/HirazawaUi)) [SIG Network and Node] +- Changes behavior of kube-proxy by allowing to set sysctl values lower than the existing one. ([#120448](https://github.com/kubernetes/kubernetes/pull/120448), [@aroradaman](https://github.com/aroradaman)) [SIG Network] +- Clean up kube-apiserver http logs for impersonated requests. ([#119795](https://github.com/kubernetes/kubernetes/pull/119795), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Dynamic resource allocation: avoid creating a new gRPC connection for every call of prepare/unprepare resource(s) ([#118619](https://github.com/kubernetes/kubernetes/pull/118619), [@TommyStarK](https://github.com/TommyStarK)) [SIG Node] +- Fixes an issue where the vsphere cloud provider will not trust a certificate if: + - The issuer of the certificate is unknown (x509.UnknownAuthorityError) + - The requested name does not match the set of authorized names (x509.HostnameError) + - The error surfaced after attempting a connection contains one of the substrings: "certificate is not trusted" or "certificate signed by unknown authority" ([#120736](https://github.com/kubernetes/kubernetes/pull/120736), [@MadhavJivrajani](https://github.com/MadhavJivrajani)) [SIG Architecture and Cloud Provider] +- Fixes bug where Adding GroupVersion log line is constantly repeated without any group version changes ([#119825](https://github.com/kubernetes/kubernetes/pull/119825), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery] +- Generated ResourceClaim names are now more readable because of an additional hyphen before the random suffix (`--` ). ([#120336](https://github.com/kubernetes/kubernetes/pull/120336), [@pohly](https://github.com/pohly)) [SIG Apps and Node] +- Improve memory usage of kube-controller-manager by dropping the `.metadata.managedFields` field that kube-controller-manager doesn't require. ([#118455](https://github.com/kubernetes/kubernetes/pull/118455), [@linxiulei](https://github.com/linxiulei)) [SIG API Machinery and Cloud Provider] +- Kubeadm: remove 'system:masters' organization from apiserver-etcd-client certificate ([#120521](https://github.com/kubernetes/kubernetes/pull/120521), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: updated warning message when swap space is detected. When swap is active on Linux, kubeadm explains that swap is supported for cgroup v2 only and is beta but disabled by default. ([#120198](https://github.com/kubernetes/kubernetes/pull/120198), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Makefile and scripts now respect GOTOOLCHAIN and otherwise ensure ./.go-version is used ([#120279](https://github.com/kubernetes/kubernetes/pull/120279), [@BenTheElder](https://github.com/BenTheElder)) [SIG Release] +- Optimized NodeUnschedulable Filter to avoid unnecessary calculations ([#119399](https://github.com/kubernetes/kubernetes/pull/119399), [@wackxu](https://github.com/wackxu)) [SIG Scheduling] +- Previously, the pod name and namespace were eliminated in the event log message. This PR attempts to add the preemptor pod UID in the preemption event message logs for easier debugging and safer transparency. ([#119971](https://github.com/kubernetes/kubernetes/pull/119971), [@kwakubiney](https://github.com/kwakubiney)) [SIG Scheduling] +- Promote to conformance a test that verify that Services only forward traffic on the port and protocol specified. ([#120069](https://github.com/kubernetes/kubernetes/pull/120069), [@aojea](https://github.com/aojea)) [SIG Architecture, Network and Testing] +- Remove ephemeral container legacy server support for the server versions prior to 1.22 ([#119537](https://github.com/kubernetes/kubernetes/pull/119537), [@ardaguclu](https://github.com/ardaguclu)) [SIG CLI] +- Scheduler: handling of unschedulable pods because a ResourceClass is missing is a bit more efficient and no longer relies on periodic retries ([#120213](https://github.com/kubernetes/kubernetes/pull/120213), [@pohly](https://github.com/pohly)) [SIG Node, Scheduling and Testing] +- Set the resolution for the job_controller_job_sync_duration_seconds metric from 4ms to 1min ([#120577](https://github.com/kubernetes/kubernetes/pull/120577), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and Instrumentation] +- Statefulset should wait for new replicas in tests when removing .start.ordinal ([#119761](https://github.com/kubernetes/kubernetes/pull/119761), [@soltysh](https://github.com/soltysh)) [SIG Apps and Testing] +- The `horizontalpodautoscaling` and `clusterrole-aggregation` controllers now assume the `autoscaling/v1` and `rbac.authorization.k8s.io/v1` APIs are available. If you disable those APIs and do not want to run those controllers, exclude them by passing `--controllers=-horizontalpodautoscaling` or `--controllers=-clusterrole-aggregation` to `kube-controller-manager`. ([#117977](https://github.com/kubernetes/kubernetes/pull/117977), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Cloud Provider] +- The metrics controlled by the ComponentSLIs feature-gate and served at /metrics/slis are now GA and unconditionally enabled. The feature-gate will be removed in 1.31. ([#120574](https://github.com/kubernetes/kubernetes/pull/120574), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Architecture, Cloud Provider, Instrumentation, Network, Node and Scheduling] +- Updated CNI plugins to v1.3.0. ([#119969](https://github.com/kubernetes/kubernetes/pull/119969), [@saschagrunert](https://github.com/saschagrunert)) [SIG Cloud Provider, Node and Testing] +- Updated cri-tools to v1.28.0. ([#119933](https://github.com/kubernetes/kubernetes/pull/119933), [@saschagrunert](https://github.com/saschagrunert)) [SIG Cloud Provider] +- Updated distroless-iptables to use registry.k8s.io/build-image/distroless-iptables:v0.3.1 ([#120352](https://github.com/kubernetes/kubernetes/pull/120352), [@saschagrunert](https://github.com/saschagrunert)) [SIG Release and Testing] +- Upgrade coredns to v1.11.1 ([#120116](https://github.com/kubernetes/kubernetes/pull/120116), [@tukwila](https://github.com/tukwila)) [SIG Cloud Provider and Cluster Lifecycle] +- ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding objects are persisted in etcd using the v1beta1 version. Remove alpha objects or disable the alpha ValidatingAdmissionPolicy feature in a 1.27 server before upgrading to a 1.28 server with the beta feature and API enabled. ([#120018](https://github.com/kubernetes/kubernetes/pull/120018), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] +- Yes, kubectl will not support the "/swagger-2.0.0.pb-v1" endpoint that has been long deprecated ([#119410](https://github.com/kubernetes/kubernetes/pull/119410), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery] + +## Dependencies + +### Added +- github.com/distribution/reference: [v0.5.0](https://github.com/distribution/reference/tree/v0.5.0) + +### Changed +- github.com/coredns/corefile-migration: [v1.0.20 → v1.0.21](https://github.com/coredns/corefile-migration/compare/v1.0.20...v1.0.21) +- github.com/docker/distribution: [v2.8.2+incompatible → v2.8.1+incompatible](https://github.com/docker/distribution/compare/v2.8.2...v2.8.1) +- github.com/evanphx/json-patch: [v5.6.0+incompatible → v4.12.0+incompatible](https://github.com/evanphx/json-patch/compare/v5.6.0...v4.12.0) +- github.com/google/cel-go: [v0.16.0 → v0.17.6](https://github.com/google/cel-go/compare/v0.16.0...v0.17.6) +- github.com/gorilla/websocket: [v1.4.2 → v1.5.0](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0) +- github.com/opencontainers/runc: [v1.1.7 → v1.1.9](https://github.com/opencontainers/runc/compare/v1.1.7...v1.1.9) +- github.com/opencontainers/selinux: [v1.10.0 → v1.11.0](https://github.com/opencontainers/selinux/compare/v1.10.0...v1.11.0) +- github.com/vmware/govmomi: [v0.30.0 → v0.30.6](https://github.com/vmware/govmomi/compare/v0.30.0...v0.30.6) +- google.golang.org/protobuf: v1.30.0 → v1.31.0 +- k8s.io/gengo: c0856e2 → 9cce18d +- k8s.io/kube-openapi: 2695361 → d090da1 +- k8s.io/utils: d93618c → 3b25d92 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.2 → v0.28.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.2.3 → v4.3.0 + +### Removed +_Nothing has changed._ \ No newline at end of file diff --git a/CHANGELOG/OWNERS b/CHANGELOG/OWNERS index 3b2a9e3e28fb0..1f51e1a0c3993 100644 --- a/CHANGELOG/OWNERS +++ b/CHANGELOG/OWNERS @@ -12,14 +12,14 @@ approvers: - harshanarayana # 1.27 Release Notes Lead - ramrodo # 1.26 Release Notes Lead - sanchita-07 # 1.28 Release Notes Lead + - fsmunoz # 1.29 Release Notes Lead reviewers: - release-managers - - AnaMMedina21 # 1.28 Release Notes Shadow - - fsmunoz # 1.28 Release Notes Shadow - - michaelfromyeg # 1.28 Release Notes Shadow - - muddapu # 1.28 Release Notes Shadow - - rashansmith # 1.28 Release Notes Shadow - - sanchita-07 # 1.28 Release Notes Lead + - fsmunoz # 1.29 Release Notes Lead + - fykaa # 1.29 Release Notes Shadow + - mengjiao-liu # 1.29 Release Notes Shadow + - muddapu # 1.29 Release Notes Shadow + - rashansmith # 1.29 Release Notes Shadow labels: - sig/release - area/release-eng diff --git a/CHANGELOG/README.md b/CHANGELOG/README.md index c0434decf4198..13179a108322e 100644 --- a/CHANGELOG/README.md +++ b/CHANGELOG/README.md @@ -1,5 +1,6 @@ # CHANGELOGs +- [CHANGELOG-1.29.md](./CHANGELOG-1.29.md) - [CHANGELOG-1.28.md](./CHANGELOG-1.28.md) - [CHANGELOG-1.27.md](./CHANGELOG-1.27.md) - [CHANGELOG-1.26.md](./CHANGELOG-1.26.md) diff --git a/LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/LICENSES/vendor/github.com/google/s2a-go/LICENSE similarity index 98% rename from LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE rename to LICENSES/vendor/github.com/google/s2a-go/LICENSE index bdbfa6963be47..5f39be4994dbd 100644 --- a/LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE +++ b/LICENSES/vendor/github.com/google/s2a-go/LICENSE @@ -1,4 +1,5 @@ -= vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry licensed under: = += vendor/github.com/google/s2a-go licensed under: = + Apache License Version 2.0, January 2004 @@ -202,4 +203,4 @@ See the License for the specific language governing permissions and limitations under the License. -= vendor/go.opentelemetry.io/otel/LICENSE 86d3f3a95c324c9479bd8986968f4327 += vendor/github.com/google/s2a-go/LICENSE.md 3b83ef96387f14655fc854ddc3c6bd57 diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 8e80475e861e7..72749023ee27a 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -223,6 +223,13 @@ aliases: # emeretus: # - dashpole # - vishh + sig-node-cri-approvers: + - msau42 + - smarterclayton + - thockin + - saschagrunert + - haircommander + - mikebrow sig-node-reviewers: - Random-Liu - dchen1107 @@ -246,6 +253,7 @@ aliases: - saschagrunert - haircommander - tzneal + - rphillips sig-network-approvers: - andrewsykim - aojea @@ -357,10 +365,6 @@ aliases: - caseydavenport - danwinship - thockin - sig-node-api-approvers: - - msau42 - - smarterclayton - - thockin sig-scheduling-api-approvers: - msau42 - smarterclayton diff --git a/README.md b/README.md index b8781616a1ef9..a171572b5a60d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Kubernetes (K8s) -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/569/badge)](https://bestpractices.coreinfrastructure.org/projects/569) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/kubernetes)](https://goreportcard.com/report/github.com/kubernetes/kubernetes) ![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/kubernetes/kubernetes) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/569/badge)](https://bestpractices.coreinfrastructure.org/projects/569) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/kubernetes)](https://goreportcard.com/report/github.com/kubernetes/kubernetes) ![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/kubernetes/kubernetes?sort=semver) diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS index 6df6a4d6a1649..21a9a3a3f6816 100644 --- a/SECURITY_CONTACTS +++ b/SECURITY_CONTACTS @@ -1,17 +1,15 @@ # Defined below are the security contacts for this repo. # -# They are the contact point for the Product Security Committee to reach out +# They are the contact point for the Security Response Committee (SRC) to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the # [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # +# To contact the SRC, please see https://github.com/kubernetes/committee-security-response#contacting-the-src +# # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -cjcullen -joelsmith -liggitt -philips -tallclair +committee-security-response diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index 4f7068c401ebb..c666da22f859d 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -355,8 +355,6 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS -API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv4 -API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv6 API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource diff --git a/api/discovery/aggregated_v2beta1.json b/api/discovery/aggregated_v2beta1.json index 69f29ef5f9323..dd7c3f2649b21 100644 --- a/api/discovery/aggregated_v2beta1.json +++ b/api/discovery/aggregated_v2beta1.json @@ -853,29 +853,6 @@ { "freshness": "Current", "resources": [ - { - "resource": "clustercidrs", - "responseKind": { - "group": "", - "kind": "ClusterCIDR", - "version": "" - }, - "scope": "Cluster", - "shortNames": [ - "cc" - ], - "singularResource": "clustercidr", - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] - }, { "resource": "ipaddresses", "responseKind": { diff --git a/api/discovery/apis__networking.k8s.io__v1alpha1.json b/api/discovery/apis__networking.k8s.io__v1alpha1.json index 15eeefa973f0b..aac5d63a2184f 100644 --- a/api/discovery/apis__networking.k8s.io__v1alpha1.json +++ b/api/discovery/apis__networking.k8s.io__v1alpha1.json @@ -3,26 +3,6 @@ "groupVersion": "networking.k8s.io/v1alpha1", "kind": "APIResourceList", "resources": [ - { - "kind": "ClusterCIDR", - "name": "clustercidrs", - "namespaced": false, - "shortNames": [ - "cc" - ], - "singularName": "clustercidr", - "storageVersionHash": "iC0u25BTSsc=", - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] - }, { "kind": "IPAddress", "name": "ipaddresses", diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 5f6891dc43c3d..6c1fdb78bb2c4 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -4239,7 +4239,7 @@ "type": "integer" }, "backoffLimitPerIndex": { - "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "format": "int32", "type": "integer" }, @@ -4257,7 +4257,7 @@ "type": "boolean" }, "maxFailedIndexes": { - "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "format": "int32", "type": "integer" }, @@ -4329,7 +4329,7 @@ "type": "integer" }, "failedIndexes": { - "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "type": "string" }, "ready": { @@ -4438,7 +4438,7 @@ "description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", "properties": { "action": { - "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "type": "string" }, "onExitCodes": { @@ -6025,7 +6025,7 @@ "description": "EndpointPort is a tuple that describes a single port.", "properties": { "appProtocol": { - "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { @@ -7006,6 +7006,10 @@ "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction", "description": "HTTPGet specifies the http request to perform." }, + "sleep": { + "$ref": "#/definitions/io.k8s.api.core.v1.SleepAction", + "description": "Sleep represents the duration that the container should sleep before being terminated." + }, "tcpSocket": { "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction", "description": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." @@ -8350,7 +8354,23 @@ "properties": { "labelSelector": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", - "description": "A label query over a set of resources, in this case pods." + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, "namespaceSelector": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", @@ -10260,7 +10280,7 @@ "description": "ServicePort contains information on service's port.", "properties": { "appProtocol": { - "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { @@ -10435,6 +10455,20 @@ }, "type": "object" }, + "io.k8s.api.core.v1.SleepAction": { + "description": "SleepAction describes a \"sleep\" action.", + "properties": { + "seconds": { + "description": "Seconds is the number of seconds to sleep.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "io.k8s.api.core.v1.StorageOSPersistentVolumeSource": { "description": "Represents a StorageOS persistent volume resource.", "properties": { @@ -11090,11 +11124,11 @@ "description": "EndpointPort represents a Port used by an EndpointSlice", "properties": { "appProtocol": { - "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { - "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { @@ -11558,7 +11592,7 @@ "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -13095,96 +13129,6 @@ }, "type": "object" }, - "io.k8s.api.networking.v1alpha1.ClusterCIDR": { - "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec", - "description": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { - "description": "ClusterCIDRList contains a list of ClusterCIDR.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "items is the list of ClusterCIDRs.", - "items": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDRList", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { - "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "properties": { - "ipv4": { - "description": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "ipv6": { - "description": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "nodeSelector": { - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", - "description": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable." - }, - "perNodeHostBits": { - "description": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "format": "int32", - "type": "integer" - } - }, - "required": [ - "perNodeHostBits" - ], - "type": "object" - }, "io.k8s.api.networking.v1alpha1.IPAddress": { "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "properties": { @@ -13277,10 +13221,6 @@ "resource": { "description": "Resource is the resource of the object being referenced.", "type": "string" - }, - "uid": { - "description": "UID is the uid of the object being referenced.", - "type": "string" } }, "type": "object" @@ -14640,7 +14580,7 @@ "type": "string" }, "podInfoOnMount": { - "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "type": "boolean" }, "requiresRepublish": { @@ -17270,7 +17210,7 @@ "type": "string" }, "groupPriorityMinimum": { - "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", "format": "int32", "type": "integer" }, @@ -17528,8 +17468,8 @@ "type": "integer", "uniqueItems": true }, - "pretty-nN7o5FEq": { - "description": "If 'true', then the output is pretty printed.", + "pretty-tJGM1-ng": { + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "type": "string", @@ -17810,7 +17750,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -17875,7 +17815,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ] }, @@ -17934,7 +17874,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -18008,7 +17948,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -18082,7 +18022,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -18156,7 +18096,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -18247,7 +18187,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -18348,7 +18288,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -18564,7 +18504,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -18753,7 +18693,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -19049,7 +18989,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -19238,7 +19178,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -19534,7 +19474,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -19723,7 +19663,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -20019,7 +19959,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -20208,7 +20148,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -20504,7 +20444,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -20693,7 +20633,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -20883,7 +20823,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -21179,7 +21119,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -21368,7 +21308,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -21636,7 +21576,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -21746,7 +21686,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -21918,7 +21858,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -22140,7 +22080,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/previous-1jxDPu3y" @@ -22790,7 +22730,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -23086,7 +23026,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -23275,7 +23215,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -23571,7 +23511,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -23760,7 +23700,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -23950,7 +23890,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -24140,7 +24080,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -24436,7 +24376,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -24625,7 +24565,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -24815,7 +24755,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -25111,7 +25051,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -25300,7 +25240,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -25596,7 +25536,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -25785,7 +25725,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -25957,7 +25897,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -26173,7 +26113,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -26362,7 +26302,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -27053,7 +26993,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -27302,7 +27242,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -27471,7 +27411,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "put": { @@ -27572,7 +27512,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -27865,7 +27805,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -28051,7 +27991,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -28733,7 +28673,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -28929,7 +28869,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -29100,7 +29040,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -29286,7 +29226,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -29473,7 +29413,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -29669,7 +29609,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -29743,7 +29683,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -29817,7 +29757,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -29891,7 +29831,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -29965,7 +29905,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30039,7 +29979,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30113,7 +30053,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30187,7 +30127,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30261,7 +30201,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30335,7 +30275,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30409,7 +30349,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30483,7 +30423,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30560,7 +30500,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30645,7 +30585,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30722,7 +30662,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30807,7 +30747,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30884,7 +30824,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -30969,7 +30909,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31046,7 +30986,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31131,7 +31071,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31208,7 +31148,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31293,7 +31233,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31370,7 +31310,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31455,7 +31395,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31532,7 +31472,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31617,7 +31557,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31694,7 +31634,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31779,7 +31719,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31856,7 +31796,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -31941,7 +31881,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32018,7 +31958,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32103,7 +32043,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32180,7 +32120,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32265,7 +32205,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32342,7 +32282,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32427,7 +32367,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32509,7 +32449,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32583,7 +32523,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32665,7 +32605,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32739,7 +32679,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32813,7 +32753,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32895,7 +32835,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -32969,7 +32909,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33043,7 +32983,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33117,7 +33057,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33191,7 +33131,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33265,7 +33205,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33339,7 +33279,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33413,7 +33353,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -33683,7 +33623,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -33869,7 +33809,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -34162,7 +34102,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -34348,7 +34288,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -34544,7 +34484,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -34626,7 +34566,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -34700,7 +34640,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -34782,7 +34722,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -34986,7 +34926,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -35172,7 +35112,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -35359,7 +35299,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -35652,7 +35592,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -35838,7 +35778,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -36034,7 +35974,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -36116,7 +36056,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -36190,7 +36130,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -36272,7 +36212,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -36476,7 +36416,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -36662,7 +36602,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -36849,7 +36789,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -37142,7 +37082,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -37328,7 +37268,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -37524,7 +37464,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -37606,7 +37546,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -37680,7 +37620,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -37762,7 +37702,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -37999,7 +37939,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -38185,7 +38125,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -38372,7 +38312,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -38568,7 +38508,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -38650,7 +38590,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -38887,7 +38827,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -39073,7 +39013,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -39260,7 +39200,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -39456,7 +39396,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -39538,7 +39478,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -39678,7 +39618,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -39752,7 +39692,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -39826,7 +39766,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -40000,7 +39940,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -40189,7 +40129,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -40485,7 +40425,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -40674,7 +40614,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -40864,7 +40804,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -41160,7 +41100,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -41349,7 +41289,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -41539,7 +41479,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -41729,7 +41669,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -42025,7 +41965,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -42214,7 +42154,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -42404,7 +42344,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -42594,7 +42534,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -42890,7 +42830,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -43079,7 +43019,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -43269,7 +43209,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -43459,7 +43399,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -43655,7 +43595,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -43729,7 +43669,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -43803,7 +43743,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -43877,7 +43817,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -43951,7 +43891,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44028,7 +43968,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44113,7 +44053,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44190,7 +44130,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44275,7 +44215,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44352,7 +44292,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44437,7 +44377,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44514,7 +44454,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44599,7 +44539,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44676,7 +44616,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44761,7 +44701,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44835,7 +44775,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -44909,7 +44849,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -45014,7 +44954,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45095,7 +45035,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45209,7 +45149,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45323,7 +45263,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45473,7 +45413,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45554,7 +45494,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45635,7 +45575,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45716,7 +45656,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -45898,7 +45838,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -46072,7 +46012,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -46261,7 +46201,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -46451,7 +46391,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -46647,7 +46587,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -46724,7 +46664,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -46809,7 +46749,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -46916,7 +46856,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -47090,7 +47030,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -47279,7 +47219,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -47469,7 +47409,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -47665,7 +47605,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -47742,7 +47682,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -47827,7 +47767,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -47967,7 +47907,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -48041,7 +47981,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -48215,7 +48155,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -48404,7 +48344,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -48594,7 +48534,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -48890,7 +48830,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -49079,7 +49019,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -49269,7 +49209,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -49465,7 +49405,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -49539,7 +49479,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -49616,7 +49556,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -49701,7 +49641,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -49778,7 +49718,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -49863,7 +49803,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -50100,7 +50040,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -50286,7 +50226,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -50473,7 +50413,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -50660,7 +50600,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -50856,7 +50796,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -50938,7 +50878,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -51142,7 +51082,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -51328,7 +51268,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -51524,7 +51464,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -51606,7 +51546,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -51746,7 +51686,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -51920,7 +51860,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -52109,7 +52049,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -52305,7 +52245,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -52382,7 +52322,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -52467,7 +52407,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -52607,7 +52547,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -52781,7 +52721,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -52970,7 +52910,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -53166,7 +53106,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -53243,7 +53183,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -53328,7 +53268,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -53468,7 +53408,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -53642,7 +53582,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -53831,7 +53771,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -54027,7 +53967,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -54104,7 +54044,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -54189,7 +54129,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -54426,7 +54366,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -54612,7 +54552,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -54799,7 +54739,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -55092,7 +55032,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -55278,7 +55218,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -55465,7 +55405,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -55661,7 +55601,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -55743,7 +55683,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -55817,7 +55757,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -55899,7 +55839,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -56103,7 +56043,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -56289,7 +56229,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -56476,7 +56416,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -56769,7 +56709,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -56955,7 +56895,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -57142,7 +57082,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -57338,7 +57278,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -57420,7 +57360,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -57494,7 +57434,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -57576,7 +57516,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -57813,7 +57753,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -57999,7 +57939,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -58186,7 +58126,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -58382,7 +58322,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -58464,7 +58404,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -58701,7 +58641,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -58887,7 +58827,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -59083,7 +59023,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -59257,7 +59197,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -59446,7 +59386,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -59636,7 +59576,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -59932,7 +59872,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -60121,7 +60061,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -60317,7 +60257,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60391,7 +60331,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60473,7 +60413,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60547,7 +60487,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60624,7 +60564,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60709,7 +60649,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60786,7 +60726,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60871,7 +60811,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60945,7 +60885,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -60997,485 +60937,6 @@ ] } }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs": { - "delete": { - "consumes": [ - "*/*" - ], - "description": "delete collection of ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/body-2Y1dVQaQ" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/gracePeriodSeconds--K5HaBOS" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/orphanDependents-uRB25kX5" - }, - { - "$ref": "#/parameters/propagationPolicy-6jk3prlO" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "consumes": [ - "*/*" - ], - "description": "list or watch objects of kind ClusterCIDR", - "operationId": "listNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "$ref": "#/parameters/pretty-nN7o5FEq" - } - ], - "post": { - "consumes": [ - "*/*" - ], - "description": "create a ClusterCIDR", - "operationId": "createNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "202": { - "description": "Accepted", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { - "delete": { - "consumes": [ - "*/*" - ], - "description": "delete a ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/body-2Y1dVQaQ" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/gracePeriodSeconds--K5HaBOS" - }, - { - "$ref": "#/parameters/orphanDependents-uRB25kX5" - }, - { - "$ref": "#/parameters/propagationPolicy-6jk3prlO" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "202": { - "description": "Accepted", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "consumes": [ - "*/*" - ], - "description": "read the specified ClusterCIDR", - "operationId": "readNetworkingV1alpha1ClusterCIDR", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/pretty-nN7o5FEq" - } - ], - "patch": { - "consumes": [ - "application/json-patch+json", - "application/merge-patch+json", - "application/strategic-merge-patch+json", - "application/apply-patch+yaml" - ], - "description": "partially update the specified ClusterCIDR", - "operationId": "patchNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/body-78PwaGsr" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-7c6nTn1T" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/force-tOGGb0Yi" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "put": { - "consumes": [ - "*/*" - ], - "description": "replace the specified ClusterCIDR", - "operationId": "replaceNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, "/apis/networking.k8s.io/v1alpha1/ipaddresses": { "delete": { "consumes": [ @@ -61628,7 +61089,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -61814,7 +61275,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -61955,162 +61416,6 @@ } } }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchNetworkingV1alpha1ClusterCIDRList", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/pretty-nN7o5FEq" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchNetworkingV1alpha1ClusterCIDR", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/pretty-nN7o5FEq" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, "/apis/networking.k8s.io/v1alpha1/watch/ipaddresses": { "get": { "consumes": [ @@ -62166,7 +61471,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -62248,7 +61553,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -62485,7 +61790,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -62671,7 +61976,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -62867,7 +62172,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -62949,7 +62254,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -63189,7 +62494,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -63378,7 +62683,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -63568,7 +62873,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -63764,7 +63069,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -63841,7 +63146,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -63926,7 +63231,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -64000,7 +63305,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -64237,7 +63542,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -64423,7 +63728,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -64716,7 +64021,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -64902,7 +64207,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -65198,7 +64503,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -65387,7 +64692,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -65683,7 +64988,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -65872,7 +65177,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -66068,7 +65373,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66142,7 +65447,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66216,7 +65521,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66298,7 +65603,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66372,7 +65677,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66454,7 +65759,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66531,7 +65836,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66616,7 +65921,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66693,7 +65998,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66778,7 +66083,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66852,7 +66157,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -66926,7 +66231,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -67166,7 +66471,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -67355,7 +66660,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -67545,7 +66850,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -67841,7 +67146,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -68030,7 +67335,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -68220,7 +67525,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -68516,7 +67821,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -68705,7 +68010,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -68901,7 +68206,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -68975,7 +68280,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -69049,7 +68354,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -69220,7 +68525,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -69406,7 +68711,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -69605,7 +68910,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -69690,7 +68995,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -69767,7 +69072,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -69852,7 +69157,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -69929,7 +69234,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70014,7 +69319,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70088,7 +69393,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70162,7 +69467,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70236,7 +69541,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70310,7 +69615,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70392,7 +69697,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -70629,7 +69934,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -70815,7 +70120,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -71011,7 +70316,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -71093,7 +70398,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -71330,7 +70635,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -71516,7 +70821,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -71809,7 +71114,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -71995,7 +71300,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -72191,7 +71496,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -72365,7 +71670,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -72554,7 +71859,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -72847,7 +72152,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -73033,7 +72338,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -73326,7 +72631,7 @@ }, "parameters": [ { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "post": { @@ -73512,7 +72817,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -73699,7 +73004,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" } ], "patch": { @@ -73895,7 +73200,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -73977,7 +73282,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74051,7 +73356,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74133,7 +73438,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74207,7 +73512,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74284,7 +73589,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74369,7 +73674,7 @@ "$ref": "#/parameters/namespace-vgWSWtn3" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74443,7 +73748,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74525,7 +73830,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74599,7 +73904,7 @@ "$ref": "#/parameters/limit-1NfNmdNH" }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" @@ -74681,7 +73986,7 @@ "uniqueItems": true }, { - "$ref": "#/parameters/pretty-nN7o5FEq" + "$ref": "#/parameters/pretty-tJGM1-ng" }, { "$ref": "#/parameters/resourceVersion-5WAnf1kx" diff --git a/api/openapi-spec/v3/api__v1_openapi.json b/api/openapi-spec/v3/api__v1_openapi.json index f5b3a8b198642..f39bb8674dce5 100644 --- a/api/openapi-spec/v3/api__v1_openapi.json +++ b/api/openapi-spec/v3/api__v1_openapi.json @@ -113,7 +113,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "ExpirationTimestamp is the time of expiration of the returned token." }, "token": { @@ -1368,7 +1367,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Time at which the container was last (re-)started" } }, @@ -1393,7 +1391,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Time at which the container last terminated" }, "message": { @@ -1415,7 +1412,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Time at which previous execution of the container started" } }, @@ -1443,12 +1439,7 @@ "properties": { "allocatedResources": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", "type": "object" @@ -1667,7 +1658,7 @@ "description": "EndpointPort is a tuple that describes a single port.", "properties": { "appProtocol": { - "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { @@ -2151,7 +2142,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" } ], - "default": {}, "description": "Time when this Event was first observed." }, "firstTimestamp": { @@ -2160,7 +2150,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)" }, "involvedObject": { @@ -2182,7 +2171,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "The time at which the most recent occurrence of this event was recorded." }, "message": { @@ -2314,7 +2302,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" } ], - "default": {}, "description": "Time of the last occurrence observed" } }, @@ -2622,7 +2609,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." }, "scheme": { @@ -2901,6 +2887,14 @@ ], "description": "HTTPGet specifies the http request to perform." }, + "sleep": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SleepAction" + } + ], + "description": "Sleep represents the duration that the container should sleep before being terminated." + }, "tcpSocket": { "allOf": [ { @@ -2956,60 +2950,35 @@ "properties": { "default": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Default resource requirement limit value by resource name if resource limit is omitted.", "type": "object" }, "defaultRequest": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", "type": "object" }, "max": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Max usage constraints on this kind by resource name.", "type": "object" }, "maxLimitRequestRatio": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", "type": "object" }, "min": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Min usage constraints on this kind by resource name.", "type": "object" @@ -3245,12 +3214,7 @@ "description": "NamespaceCondition contains details about state of namespace.", "properties": { "lastTransitionTime": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { "type": "string" @@ -3461,7 +3425,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time we got an update on a given condition." }, "lastTransitionTime": { @@ -3470,7 +3433,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transit from one status to another." }, "message": { @@ -3757,24 +3719,14 @@ }, "allocatable": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "type": "object" }, "capacity": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", "type": "object" @@ -4085,7 +4037,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastProbeTime is the time we probed the condition." }, "lastTransitionTime": { @@ -4094,7 +4045,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the time the condition transitioned from one status to another." }, "message": { @@ -4246,24 +4196,14 @@ }, "allocatedResources": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "type": "object" }, "capacity": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "capacity represents the actual resources of the underlying volume.", "type": "object" @@ -4416,12 +4356,7 @@ }, "capacity": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", "type": "object" @@ -4749,7 +4684,25 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" } ], - "description": "A label query over a set of resources, in this case pods." + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, "namespaceSelector": { "allOf": [ @@ -4817,7 +4770,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time we probed the condition." }, "lastTransitionTime": { @@ -4826,7 +4778,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transitioned from one status to another." }, "message": { @@ -5262,12 +5213,7 @@ }, "overhead": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "type": "object" @@ -6037,7 +5983,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "The last time the condition transitioned from one status to another." }, "message": { @@ -6219,7 +6164,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" } ], - "default": {}, "description": "Specifies the output format of the exposed resources, defaults to \"1\"" }, "resource": { @@ -6332,12 +6276,7 @@ "properties": { "hard": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", "type": "object" @@ -6366,24 +6305,14 @@ "properties": { "hard": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", "type": "object" }, "used": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Used is the current observed total usage of the resource in the namespace.", "type": "object" @@ -6412,24 +6341,14 @@ }, "limits": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" }, "requests": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" @@ -7147,7 +7066,7 @@ "description": "ServicePort contains information on service's port.", "properties": { "appProtocol": { - "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { @@ -7176,7 +7095,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service" } }, @@ -7357,6 +7275,21 @@ }, "type": "object" }, + "io.k8s.api.core.v1.SleepAction": { + "description": "SleepAction describes a \"sleep\" action.", + "properties": { + "seconds": { + "default": 0, + "description": "Seconds is the number of seconds to sleep.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "io.k8s.api.core.v1.StorageOSPersistentVolumeSource": { "description": "Represents a StorageOS persistent volume resource.", "properties": { @@ -7450,7 +7383,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." } }, @@ -7986,24 +7918,14 @@ "properties": { "limits": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" }, "requests": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" @@ -8253,7 +8175,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." }, "message": { @@ -8767,7 +8688,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -9048,7 +8968,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -9511,7 +9430,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9617,7 +9536,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9723,7 +9642,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9874,7 +9793,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10025,7 +9944,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10176,7 +10095,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10374,7 +10293,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10422,7 +10341,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Namespace" } } - } + }, + "required": true }, "responses": { "200": { @@ -10540,7 +10460,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10559,7 +10479,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Binding" } } - } + }, + "required": true }, "responses": { "200": { @@ -10947,7 +10868,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10995,7 +10916,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ConfigMap" } } - } + }, + "required": true }, "responses": { "200": { @@ -11239,7 +11161,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -11311,7 +11233,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -11407,7 +11330,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ConfigMap" } } - } + }, + "required": true }, "responses": { "200": { @@ -11775,7 +11699,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -11823,7 +11747,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Endpoints" } } - } + }, + "required": true }, "responses": { "200": { @@ -12067,7 +11992,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -12139,7 +12064,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -12235,7 +12161,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Endpoints" } } - } + }, + "required": true }, "responses": { "200": { @@ -12603,7 +12530,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -12651,7 +12578,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Event" } } - } + }, + "required": true }, "responses": { "200": { @@ -12895,7 +12823,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -12967,7 +12895,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -13063,7 +12992,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Event" } } - } + }, + "required": true }, "responses": { "200": { @@ -13431,7 +13361,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13479,7 +13409,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.LimitRange" } } - } + }, + "required": true }, "responses": { "200": { @@ -13723,7 +13654,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13795,7 +13726,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -13891,7 +13823,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.LimitRange" } } - } + }, + "required": true }, "responses": { "200": { @@ -14259,7 +14192,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14307,7 +14240,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaim" } } - } + }, + "required": true }, "responses": { "200": { @@ -14551,7 +14485,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14623,7 +14557,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -14719,7 +14654,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaim" } } - } + }, + "required": true }, "responses": { "200": { @@ -14838,7 +14774,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14910,7 +14846,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -15006,7 +14943,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaim" } } - } + }, + "required": true }, "responses": { "200": { @@ -15374,7 +15312,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -15422,7 +15360,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Pod" } } - } + }, + "required": true }, "responses": { "200": { @@ -15666,7 +15605,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -15738,7 +15677,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -15834,7 +15774,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Pod" } } - } + }, + "required": true }, "responses": { "200": { @@ -16067,7 +16008,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -16086,7 +16027,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Binding" } } - } + }, + "required": true }, "responses": { "200": { @@ -16225,7 +16167,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -16297,7 +16239,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -16393,7 +16336,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Pod" } } - } + }, + "required": true }, "responses": { "200": { @@ -16501,7 +16445,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -16520,7 +16464,8 @@ "$ref": "#/components/schemas/io.k8s.api.policy.v1.Eviction" } } - } + }, + "required": true }, "responses": { "200": { @@ -16834,7 +16779,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -17498,7 +17443,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -17570,7 +17515,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -17666,7 +17612,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Pod" } } - } + }, + "required": true }, "responses": { "200": { @@ -18034,7 +17981,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -18082,7 +18029,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PodTemplate" } } - } + }, + "required": true }, "responses": { "200": { @@ -18326,7 +18274,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -18398,7 +18346,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -18494,7 +18443,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PodTemplate" } } - } + }, + "required": true }, "responses": { "200": { @@ -18862,7 +18812,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -18910,7 +18860,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ReplicationController" } } - } + }, + "required": true }, "responses": { "200": { @@ -19154,7 +19105,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -19226,7 +19177,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -19322,7 +19274,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ReplicationController" } } - } + }, + "required": true }, "responses": { "200": { @@ -19441,7 +19394,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -19513,7 +19466,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -19609,7 +19563,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.Scale" } } - } + }, + "required": true }, "responses": { "200": { @@ -19728,7 +19683,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -19800,7 +19755,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -19896,7 +19852,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ReplicationController" } } - } + }, + "required": true }, "responses": { "200": { @@ -20264,7 +20221,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -20312,7 +20269,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceQuota" } } - } + }, + "required": true }, "responses": { "200": { @@ -20556,7 +20514,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -20628,7 +20586,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -20724,7 +20683,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceQuota" } } - } + }, + "required": true }, "responses": { "200": { @@ -20843,7 +20803,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -20915,7 +20875,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -21011,7 +20972,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceQuota" } } - } + }, + "required": true }, "responses": { "200": { @@ -21379,7 +21341,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -21427,7 +21389,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Secret" } } - } + }, + "required": true }, "responses": { "200": { @@ -21671,7 +21634,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -21743,7 +21706,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -21839,7 +21803,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Secret" } } - } + }, + "required": true }, "responses": { "200": { @@ -22207,7 +22172,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -22255,7 +22220,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ServiceAccount" } } - } + }, + "required": true }, "responses": { "200": { @@ -22499,7 +22465,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -22571,7 +22537,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -22667,7 +22634,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.ServiceAccount" } } - } + }, + "required": true }, "responses": { "200": { @@ -22775,7 +22743,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -22794,7 +22762,8 @@ "$ref": "#/components/schemas/io.k8s.api.authentication.v1.TokenRequest" } } - } + }, + "required": true }, "responses": { "200": { @@ -23182,7 +23151,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -23230,7 +23199,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Service" } } - } + }, + "required": true }, "responses": { "200": { @@ -23474,7 +23444,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -23546,7 +23516,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -23642,7 +23613,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Service" } } - } + }, + "required": true }, "responses": { "200": { @@ -24229,7 +24201,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -24301,7 +24273,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -24397,7 +24370,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Service" } } - } + }, + "required": true }, "responses": { "200": { @@ -24611,7 +24585,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -24683,7 +24657,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -24779,7 +24754,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Namespace" } } - } + }, + "required": true }, "responses": { "200": { @@ -24877,7 +24853,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -24896,7 +24872,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Namespace" } } - } + }, + "required": true }, "responses": { "200": { @@ -25005,7 +24982,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -25077,7 +25054,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -25173,7 +25151,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Namespace" } } - } + }, + "required": true }, "responses": { "200": { @@ -25531,7 +25510,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -25579,7 +25558,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Node" } } - } + }, + "required": true }, "responses": { "200": { @@ -25813,7 +25793,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -25885,7 +25865,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -25981,7 +25962,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Node" } } - } + }, + "required": true }, "responses": { "200": { @@ -26538,7 +26520,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -26610,7 +26592,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -26706,7 +26689,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.Node" } } - } + }, + "required": true }, "responses": { "200": { @@ -26860,7 +26844,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -27215,7 +27199,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -27263,7 +27247,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolume" } } - } + }, + "required": true }, "responses": { "200": { @@ -27497,7 +27482,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -27569,7 +27554,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -27665,7 +27651,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolume" } } - } + }, + "required": true }, "responses": { "200": { @@ -27774,7 +27761,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -27846,7 +27833,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -27942,7 +27930,8 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolume" } } - } + }, + "required": true }, "responses": { "200": { @@ -28096,7 +28085,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -28247,7 +28236,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -28398,7 +28387,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -28549,7 +28538,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -28700,7 +28689,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -28851,7 +28840,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29002,7 +28991,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29153,7 +29142,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29304,7 +29293,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29455,7 +29444,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29606,7 +29595,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29757,7 +29746,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -29918,7 +29907,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -30089,7 +30078,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -30250,7 +30239,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -30421,7 +30410,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -30582,7 +30571,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -30753,7 +30742,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -30914,7 +30903,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -31085,7 +31074,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -31246,7 +31235,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -31417,7 +31406,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -31578,7 +31567,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -31749,7 +31738,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -31910,7 +31899,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -32081,7 +32070,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -32242,7 +32231,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -32413,7 +32402,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -32574,7 +32563,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -32745,7 +32734,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -32906,7 +32895,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -33077,7 +33066,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -33238,7 +33227,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -33409,7 +33398,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -33570,7 +33559,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -33741,7 +33730,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -33902,7 +33891,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34053,7 +34042,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34214,7 +34203,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34365,7 +34354,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34516,7 +34505,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34677,7 +34666,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34828,7 +34817,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -34979,7 +34968,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -35130,7 +35119,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -35281,7 +35270,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -35432,7 +35421,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -35583,7 +35572,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -35734,7 +35723,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json index 13e8e5e5ebc57..5a800e8debb23 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json @@ -1077,7 +1077,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1358,7 +1357,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2013,7 +2011,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2061,7 +2059,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -2295,7 +2294,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2367,7 +2366,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2463,7 +2463,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -2821,7 +2822,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2869,7 +2870,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -3103,7 +3105,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3175,7 +3177,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3271,7 +3274,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -3425,7 +3429,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3586,7 +3590,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3737,7 +3741,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3898,7 +3902,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json index 8972b36a7a6f7..e22e057c562a8 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1alpha1_openapi.json @@ -725,7 +725,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." }, "message": { @@ -1234,7 +1233,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1515,7 +1513,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2170,7 +2167,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2218,7 +2215,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -2452,7 +2450,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2524,7 +2522,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2620,7 +2619,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -2729,7 +2729,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2801,7 +2801,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2897,7 +2898,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -3255,7 +3257,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3303,7 +3305,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -3537,7 +3540,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3609,7 +3612,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3705,7 +3709,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -3859,7 +3864,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4020,7 +4025,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4171,7 +4176,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4332,7 +4337,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1beta1_openapi.json index 8d1d1561341e4..597cba97194c8 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1beta1_openapi.json @@ -727,7 +727,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." }, "message": { @@ -1236,7 +1235,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1517,7 +1515,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2172,7 +2169,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2220,7 +2217,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -2454,7 +2452,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2526,7 +2524,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2622,7 +2621,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -2731,7 +2731,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2803,7 +2803,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2899,7 +2900,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -3257,7 +3259,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3305,7 +3307,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -3539,7 +3542,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3611,7 +3614,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3707,7 +3711,8 @@ "$ref": "#/components/schemas/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -3861,7 +3866,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4022,7 +4027,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4173,7 +4178,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4334,7 +4339,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json index 649cfac7bc2a4..8408158d1a0e9 100644 --- a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json @@ -122,7 +122,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime last time the condition transitioned from one status to another." }, "message": { @@ -532,12 +531,7 @@ }, "dependencies": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray" }, "type": "object" }, @@ -546,12 +540,7 @@ }, "enum": { "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" }, "type": "array" }, @@ -1363,7 +1352,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1644,7 +1632,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2299,7 +2286,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2347,7 +2334,8 @@ "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition" } } - } + }, + "required": true }, "responses": { "200": { @@ -2581,7 +2569,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2653,7 +2641,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2749,7 +2738,8 @@ "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition" } } - } + }, + "required": true }, "responses": { "200": { @@ -2858,7 +2848,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2930,7 +2920,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3026,7 +3017,8 @@ "$ref": "#/components/schemas/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition" } } - } + }, + "required": true }, "responses": { "200": { @@ -3180,7 +3172,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3341,7 +3333,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json index e8db15e7b6e81..e6d1503bdcba1 100644 --- a/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json @@ -528,7 +528,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -809,7 +808,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1171,7 +1169,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transitioned from one status to another." }, "message": { @@ -1259,7 +1256,7 @@ }, "groupPriorityMinimum": { "default": 0, - "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", "format": "int32", "type": "integer" }, @@ -1685,7 +1682,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1733,7 +1730,8 @@ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" } } - } + }, + "required": true }, "responses": { "200": { @@ -1967,7 +1965,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2039,7 +2037,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2135,7 +2134,8 @@ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" } } - } + }, + "required": true }, "responses": { "200": { @@ -2244,7 +2244,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2316,7 +2316,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2412,7 +2413,8 @@ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" } } - } + }, + "required": true }, "responses": { "200": { @@ -2566,7 +2568,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2727,7 +2729,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__apps__v1_openapi.json b/api/openapi-spec/v3/apis__apps__v1_openapi.json index 3d553e4e7b739..80a2afa9fcbae 100644 --- a/api/openapi-spec/v3/apis__apps__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apps__v1_openapi.json @@ -14,7 +14,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Data is the serialized representation of the state." }, "kind": { @@ -151,7 +150,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transitioned from one status to another." }, "message": { @@ -420,7 +418,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transitioned from one status to another." }, "lastUpdateTime": { @@ -429,7 +426,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "The last time this condition was updated." }, "message": { @@ -693,7 +689,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "The last time the condition transitioned from one status to another." }, "message": { @@ -971,7 +966,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transitioned from one status to another." }, "message": { @@ -2558,7 +2552,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." }, "scheme": { @@ -2760,6 +2753,14 @@ ], "description": "HTTPGet specifies the http request to perform." }, + "sleep": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SleepAction" + } + ], + "description": "Sleep represents the duration that the container should sleep before being terminated." + }, "tcpSocket": { "allOf": [ { @@ -2989,7 +2990,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastProbeTime is the time we probed the condition." }, "lastTransitionTime": { @@ -2998,7 +2998,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the time the condition transitioned from one status to another." }, "message": { @@ -3105,24 +3104,14 @@ }, "allocatedResources": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "type": "object" }, "capacity": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "capacity represents the actual resources of the underlying volume.", "type": "object" @@ -3250,7 +3239,25 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" } ], - "description": "A label query over a set of resources, in this case pods." + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, "namespaceSelector": { "allOf": [ @@ -3644,12 +3651,7 @@ }, "overhead": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "type": "object" @@ -4087,7 +4089,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" } ], - "default": {}, "description": "Specifies the output format of the exposed resources, defaults to \"1\"" }, "resource": { @@ -4123,24 +4124,14 @@ }, "limits": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" }, "requests": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" @@ -4438,6 +4429,21 @@ ], "type": "object" }, + "io.k8s.api.core.v1.SleepAction": { + "description": "SleepAction describes a \"sleep\" action.", + "properties": { + "seconds": { + "default": 0, + "description": "Seconds is the number of seconds to sleep.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "io.k8s.api.core.v1.StorageOSVolumeSource": { "description": "Represents a StorageOS persistent volume resource.", "properties": { @@ -4501,7 +4507,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." } }, @@ -4991,24 +4996,14 @@ "properties": { "limits": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" }, "requests": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" @@ -5682,7 +5677,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -5963,7 +5957,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -6426,7 +6419,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6577,7 +6570,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6728,7 +6721,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7093,7 +7086,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7141,7 +7134,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.ControllerRevision" } } - } + }, + "required": true }, "responses": { "200": { @@ -7385,7 +7379,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7457,7 +7451,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -7553,7 +7548,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.ControllerRevision" } } - } + }, + "required": true }, "responses": { "200": { @@ -7921,7 +7917,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7969,7 +7965,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.DaemonSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -8213,7 +8210,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8285,7 +8282,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -8381,7 +8379,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.DaemonSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -8500,7 +8499,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8572,7 +8571,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -8668,7 +8668,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.DaemonSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -9036,7 +9037,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9084,7 +9085,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.Deployment" } } - } + }, + "required": true }, "responses": { "200": { @@ -9328,7 +9330,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9400,7 +9402,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -9496,7 +9499,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.Deployment" } } - } + }, + "required": true }, "responses": { "200": { @@ -9615,7 +9619,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9687,7 +9691,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -9783,7 +9788,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.Scale" } } - } + }, + "required": true }, "responses": { "200": { @@ -9902,7 +9908,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9974,7 +9980,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -10070,7 +10077,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.Deployment" } } - } + }, + "required": true }, "responses": { "200": { @@ -10438,7 +10446,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10486,7 +10494,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.ReplicaSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -10730,7 +10739,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -10802,7 +10811,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -10898,7 +10908,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.ReplicaSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -11017,7 +11028,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -11089,7 +11100,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -11185,7 +11197,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.Scale" } } - } + }, + "required": true }, "responses": { "200": { @@ -11304,7 +11317,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -11376,7 +11389,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -11472,7 +11486,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.ReplicaSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -11840,7 +11855,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -11888,7 +11903,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.StatefulSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -12132,7 +12148,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -12204,7 +12220,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -12300,7 +12317,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.StatefulSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -12419,7 +12437,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -12491,7 +12509,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -12587,7 +12606,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.Scale" } } - } + }, + "required": true }, "responses": { "200": { @@ -12706,7 +12726,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -12778,7 +12798,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -12874,7 +12895,8 @@ "$ref": "#/components/schemas/io.k8s.api.apps.v1.StatefulSet" } } - } + }, + "required": true }, "responses": { "200": { @@ -13028,7 +13050,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13179,7 +13201,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13330,7 +13352,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13481,7 +13503,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13632,7 +13654,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13793,7 +13815,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -13964,7 +13986,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14125,7 +14147,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14296,7 +14318,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14457,7 +14479,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14628,7 +14650,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14789,7 +14811,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -14960,7 +14982,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -15121,7 +15143,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -15292,7 +15314,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -15443,7 +15465,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -15594,7 +15616,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__authentication.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__authentication.k8s.io__v1_openapi.json index 1c2b051009c51..51e5eea38550a 100644 --- a/api/openapi-spec/v3/apis__authentication.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__authentication.k8s.io__v1_openapi.json @@ -362,7 +362,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -579,7 +578,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -598,7 +597,8 @@ "$ref": "#/components/schemas/io.k8s.api.authentication.v1.SelfSubjectReview" } } - } + }, + "required": true }, "responses": { "200": { @@ -706,7 +706,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -725,7 +725,8 @@ "$ref": "#/components/schemas/io.k8s.api.authentication.v1.TokenReview" } } - } + }, + "required": true }, "responses": { "200": { diff --git a/api/openapi-spec/v3/apis__authentication.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__authentication.k8s.io__v1alpha1_openapi.json index 563e7b0809656..dad52107601f2 100644 --- a/api/openapi-spec/v3/apis__authentication.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__authentication.k8s.io__v1alpha1_openapi.json @@ -262,7 +262,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -479,7 +478,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -498,7 +497,8 @@ "$ref": "#/components/schemas/io.k8s.api.authentication.v1alpha1.SelfSubjectReview" } } - } + }, + "required": true }, "responses": { "200": { diff --git a/api/openapi-spec/v3/apis__authentication.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__authentication.k8s.io__v1beta1_openapi.json index 3a60dd58025ca..56cbc82974fcd 100644 --- a/api/openapi-spec/v3/apis__authentication.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__authentication.k8s.io__v1beta1_openapi.json @@ -262,7 +262,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -479,7 +478,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -498,7 +497,8 @@ "$ref": "#/components/schemas/io.k8s.api.authentication.v1beta1.SelfSubjectReview" } } - } + }, + "required": true }, "responses": { "200": { diff --git a/api/openapi-spec/v3/apis__authorization.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__authorization.k8s.io__v1_openapi.json index 665101b6f5081..2035cc8c0483a 100644 --- a/api/openapi-spec/v3/apis__authorization.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__authorization.k8s.io__v1_openapi.json @@ -644,7 +644,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -871,7 +870,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -890,7 +889,8 @@ "$ref": "#/components/schemas/io.k8s.api.authorization.v1.LocalSubjectAccessReview" } } - } + }, + "required": true }, "responses": { "200": { @@ -998,7 +998,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1017,7 +1017,8 @@ "$ref": "#/components/schemas/io.k8s.api.authorization.v1.SelfSubjectAccessReview" } } - } + }, + "required": true }, "responses": { "200": { @@ -1125,7 +1126,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1144,7 +1145,8 @@ "$ref": "#/components/schemas/io.k8s.api.authorization.v1.SelfSubjectRulesReview" } } - } + }, + "required": true }, "responses": { "200": { @@ -1252,7 +1254,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1271,7 +1273,8 @@ "$ref": "#/components/schemas/io.k8s.api.authorization.v1.SubjectAccessReview" } } - } + }, + "required": true }, "responses": { "200": { diff --git a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json index f1f4649f09933..b6c99917869d0 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json @@ -721,7 +721,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1002,7 +1001,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1453,7 +1451,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1818,7 +1816,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1866,7 +1864,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler" } } - } + }, + "required": true }, "responses": { "200": { @@ -2110,7 +2109,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2182,7 +2181,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2278,7 +2278,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler" } } - } + }, + "required": true }, "responses": { "200": { @@ -2397,7 +2398,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2469,7 +2470,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2565,7 +2567,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler" } } - } + }, + "required": true }, "responses": { "200": { @@ -2719,7 +2722,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2880,7 +2883,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3051,7 +3054,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json index 7691f42f0ae98..9900caec55f7b 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json @@ -276,7 +276,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the last time the condition transitioned from one status to another" }, "message": { @@ -1429,7 +1428,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1710,7 +1708,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2161,7 +2158,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2526,7 +2523,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2574,7 +2571,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler" } } - } + }, + "required": true }, "responses": { "200": { @@ -2818,7 +2816,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2890,7 +2888,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2986,7 +2985,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler" } } - } + }, + "required": true }, "responses": { "200": { @@ -3105,7 +3105,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3177,7 +3177,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3273,7 +3274,8 @@ "$ref": "#/components/schemas/io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler" } } - } + }, + "required": true }, "responses": { "200": { @@ -3427,7 +3429,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3588,7 +3590,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3759,7 +3761,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__batch__v1_openapi.json b/api/openapi-spec/v3/apis__batch__v1_openapi.json index f905594e77bc8..58efd872eeb17 100644 --- a/api/openapi-spec/v3/apis__batch__v1_openapi.json +++ b/api/openapi-spec/v3/apis__batch__v1_openapi.json @@ -237,7 +237,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition was checked." }, "lastTransitionTime": { @@ -246,7 +245,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transit from one status to another." }, "message": { @@ -333,7 +331,7 @@ "type": "integer" }, "backoffLimitPerIndex": { - "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "format": "int32", "type": "integer" }, @@ -351,7 +349,7 @@ "type": "boolean" }, "maxFailedIndexes": { - "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "format": "int32", "type": "integer" }, @@ -445,7 +443,7 @@ "type": "integer" }, "failedIndexes": { - "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "type": "string" }, "ready": { @@ -582,7 +580,7 @@ "properties": { "action": { "default": "", - "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "type": "string" }, "onExitCodes": { @@ -1871,7 +1869,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." }, "scheme": { @@ -2073,6 +2070,14 @@ ], "description": "HTTPGet specifies the http request to perform." }, + "sleep": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SleepAction" + } + ], + "description": "Sleep represents the duration that the container should sleep before being terminated." + }, "tcpSocket": { "allOf": [ { @@ -2441,7 +2446,25 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" } ], - "description": "A label query over a set of resources, in this case pods." + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, "namespaceSelector": { "allOf": [ @@ -2835,12 +2858,7 @@ }, "overhead": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "type": "object" @@ -3278,7 +3296,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" } ], - "default": {}, "description": "Specifies the output format of the exposed resources, defaults to \"1\"" }, "resource": { @@ -3314,24 +3331,14 @@ }, "limits": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" }, "requests": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" @@ -3629,6 +3636,21 @@ ], "type": "object" }, + "io.k8s.api.core.v1.SleepAction": { + "description": "SleepAction describes a \"sleep\" action.", + "properties": { + "seconds": { + "default": 0, + "description": "Seconds is the number of seconds to sleep.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "io.k8s.api.core.v1.StorageOSVolumeSource": { "description": "Represents a StorageOS persistent volume resource.", "properties": { @@ -3692,7 +3714,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString" } ], - "default": {}, "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." } }, @@ -4182,24 +4203,14 @@ "properties": { "limits": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" }, "requests": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" @@ -4873,7 +4884,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -5154,7 +5164,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -5617,7 +5626,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5768,7 +5777,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6133,7 +6142,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6181,7 +6190,8 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob" } } - } + }, + "required": true }, "responses": { "200": { @@ -6425,7 +6435,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6497,7 +6507,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -6593,7 +6604,8 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob" } } - } + }, + "required": true }, "responses": { "200": { @@ -6712,7 +6724,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6784,7 +6796,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -6880,7 +6893,8 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob" } } - } + }, + "required": true }, "responses": { "200": { @@ -7248,7 +7262,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7296,7 +7310,8 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job" } } - } + }, + "required": true }, "responses": { "200": { @@ -7540,7 +7555,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7612,7 +7627,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -7708,7 +7724,8 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job" } } - } + }, + "required": true }, "responses": { "200": { @@ -7827,7 +7844,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7899,7 +7916,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -7995,7 +8013,8 @@ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job" } } - } + }, + "required": true }, "responses": { "200": { @@ -8149,7 +8168,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8300,7 +8319,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8461,7 +8480,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8632,7 +8651,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8793,7 +8812,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8964,7 +8983,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json index a0f76b0b4be5d..79477cd7aeb11 100644 --- a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json @@ -60,7 +60,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time." }, "lastUpdateTime": { @@ -69,7 +68,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastUpdateTime is the time of the last update to this condition" }, "message": { @@ -759,7 +757,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1040,7 +1037,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1695,7 +1691,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1743,7 +1739,8 @@ "$ref": "#/components/schemas/io.k8s.api.certificates.v1.CertificateSigningRequest" } } - } + }, + "required": true }, "responses": { "200": { @@ -1977,7 +1974,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2049,7 +2046,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2145,7 +2143,8 @@ "$ref": "#/components/schemas/io.k8s.api.certificates.v1.CertificateSigningRequest" } } - } + }, + "required": true }, "responses": { "200": { @@ -2254,7 +2253,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2326,7 +2325,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2422,7 +2422,8 @@ "$ref": "#/components/schemas/io.k8s.api.certificates.v1.CertificateSigningRequest" } } - } + }, + "required": true }, "responses": { "200": { @@ -2531,7 +2532,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2603,7 +2604,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2699,7 +2701,8 @@ "$ref": "#/components/schemas/io.k8s.api.certificates.v1.CertificateSigningRequest" } } - } + }, + "required": true }, "responses": { "200": { @@ -2853,7 +2856,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3014,7 +3017,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__certificates.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__certificates.k8s.io__v1alpha1_openapi.json index 5e337766de64a..e7c7e6a8472e6 100644 --- a/api/openapi-spec/v3/apis__certificates.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__certificates.k8s.io__v1alpha1_openapi.json @@ -633,7 +633,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -914,7 +913,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1569,7 +1567,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1617,7 +1615,8 @@ "$ref": "#/components/schemas/io.k8s.api.certificates.v1alpha1.ClusterTrustBundle" } } - } + }, + "required": true }, "responses": { "200": { @@ -1851,7 +1850,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1923,7 +1922,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2019,7 +2019,8 @@ "$ref": "#/components/schemas/io.k8s.api.certificates.v1alpha1.ClusterTrustBundle" } } - } + }, + "required": true }, "responses": { "200": { @@ -2173,7 +2174,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2334,7 +2335,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json index 76f9ae96e510d..b7bcf9c215b07 100644 --- a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json @@ -653,7 +653,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -934,7 +933,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1385,7 +1383,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1750,7 +1748,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1798,7 +1796,8 @@ "$ref": "#/components/schemas/io.k8s.api.coordination.v1.Lease" } } - } + }, + "required": true }, "responses": { "200": { @@ -2042,7 +2041,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2114,7 +2113,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2210,7 +2210,8 @@ "$ref": "#/components/schemas/io.k8s.api.coordination.v1.Lease" } } - } + }, + "required": true }, "responses": { "200": { @@ -2364,7 +2365,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2525,7 +2526,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2696,7 +2697,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json index 7613ce41fab38..bc47ef42febd9 100644 --- a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json @@ -140,11 +140,11 @@ "description": "EndpointPort represents a Port used by an EndpointSlice", "properties": { "appProtocol": { - "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { - "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { @@ -811,7 +811,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1092,7 +1091,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1543,7 +1541,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1908,7 +1906,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1956,7 +1954,8 @@ "$ref": "#/components/schemas/io.k8s.api.discovery.v1.EndpointSlice" } } - } + }, + "required": true }, "responses": { "200": { @@ -2200,7 +2199,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2272,7 +2271,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2368,7 +2368,8 @@ "$ref": "#/components/schemas/io.k8s.api.discovery.v1.EndpointSlice" } } - } + }, + "required": true }, "responses": { "200": { @@ -2522,7 +2523,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2683,7 +2684,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2854,7 +2855,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json index 93cc1a48ed9be..01b2325fa5be2 100644 --- a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json @@ -72,7 +72,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type." }, "deprecatedLastTimestamp": { @@ -81,7 +80,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type." }, "deprecatedSource": { @@ -99,7 +97,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" } ], - "default": {}, "description": "eventTime is the time when this Event was first observed. It is required." }, "kind": { @@ -233,7 +230,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" } ], - "default": {}, "description": "lastObservedTime is the time when last Event from the series was seen before last heartbeat." } }, @@ -775,7 +771,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1056,7 +1051,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1507,7 +1501,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1872,7 +1866,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1920,7 +1914,8 @@ "$ref": "#/components/schemas/io.k8s.api.events.v1.Event" } } - } + }, + "required": true }, "responses": { "200": { @@ -2164,7 +2159,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2236,7 +2231,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2332,7 +2328,8 @@ "$ref": "#/components/schemas/io.k8s.api.events.v1.Event" } } - } + }, + "required": true }, "responses": { "200": { @@ -2486,7 +2483,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2647,7 +2644,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2818,7 +2815,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json index 9fb4262a968df..9ec882a8f2740 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json @@ -88,7 +88,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." }, "message": { @@ -271,7 +270,7 @@ "properties": { "assuredConcurrencyShares": { "default": 0, - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -430,7 +429,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." }, "message": { @@ -1256,7 +1254,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1537,7 +1534,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2192,7 +2188,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2240,7 +2236,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta2.FlowSchema" } } - } + }, + "required": true }, "responses": { "200": { @@ -2474,7 +2471,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2546,7 +2543,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2642,7 +2640,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta2.FlowSchema" } } - } + }, + "required": true }, "responses": { "200": { @@ -2751,7 +2750,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2823,7 +2822,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2919,7 +2919,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta2.FlowSchema" } } - } + }, + "required": true }, "responses": { "200": { @@ -3277,7 +3278,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3325,7 +3326,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -3559,7 +3561,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3631,7 +3633,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3727,7 +3730,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -3836,7 +3840,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3908,7 +3912,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4004,7 +4009,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -4158,7 +4164,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4319,7 +4325,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4470,7 +4476,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4631,7 +4637,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json index 1a3246420596b..9f34a3f054cf5 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta3_openapi.json @@ -88,7 +88,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." }, "message": { @@ -432,7 +431,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." }, "message": { @@ -1260,7 +1258,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1541,7 +1538,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2196,7 +2192,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2244,7 +2240,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta3.FlowSchema" } } - } + }, + "required": true }, "responses": { "200": { @@ -2478,7 +2475,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2550,7 +2547,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2646,7 +2644,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta3.FlowSchema" } } - } + }, + "required": true }, "responses": { "200": { @@ -2755,7 +2754,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2827,7 +2826,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2923,7 +2923,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta3.FlowSchema" } } - } + }, + "required": true }, "responses": { "200": { @@ -3281,7 +3282,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3329,7 +3330,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -3563,7 +3565,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3635,7 +3637,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3731,7 +3734,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -3840,7 +3844,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3912,7 +3916,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4008,7 +4013,8 @@ "$ref": "#/components/schemas/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration" } } - } + }, + "required": true }, "responses": { "200": { @@ -4162,7 +4168,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4323,7 +4329,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4474,7 +4480,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4635,7 +4641,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json index 24629da958fa2..b3f08ef908c0b 100644 --- a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json @@ -94,7 +94,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "Last time the condition transitioned from one status to another." }, "message": { @@ -747,7 +746,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1028,7 +1026,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1683,7 +1680,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1731,7 +1728,8 @@ "$ref": "#/components/schemas/io.k8s.api.apiserverinternal.v1alpha1.StorageVersion" } } - } + }, + "required": true }, "responses": { "200": { @@ -1965,7 +1963,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2037,7 +2035,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2133,7 +2132,8 @@ "$ref": "#/components/schemas/io.k8s.api.apiserverinternal.v1alpha1.StorageVersion" } } - } + }, + "required": true }, "responses": { "200": { @@ -2242,7 +2242,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2314,7 +2314,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2410,7 +2411,8 @@ "$ref": "#/components/schemas/io.k8s.api.apiserverinternal.v1alpha1.StorageVersion" } } - } + }, + "required": true }, "responses": { "200": { @@ -2564,7 +2566,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2725,7 +2727,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json index a9e9b56a180ca..51122cf449a47 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json @@ -1374,7 +1374,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1655,7 +1654,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2322,7 +2320,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2370,7 +2368,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.IngressClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -2604,7 +2603,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2676,7 +2675,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2772,7 +2772,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.IngressClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -2926,7 +2927,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3291,7 +3292,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3339,7 +3340,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.Ingress" } } - } + }, + "required": true }, "responses": { "200": { @@ -3583,7 +3585,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3655,7 +3657,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3751,7 +3754,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.Ingress" } } - } + }, + "required": true }, "responses": { "200": { @@ -3870,7 +3874,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3942,7 +3946,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4038,7 +4043,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.Ingress" } } - } + }, + "required": true }, "responses": { "200": { @@ -4406,7 +4412,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4454,7 +4460,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.NetworkPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -4698,7 +4705,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4770,7 +4777,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4866,7 +4874,8 @@ "$ref": "#/components/schemas/io.k8s.api.networking.v1.NetworkPolicy" } } - } + }, + "required": true }, "responses": { "200": { @@ -5020,7 +5029,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5171,7 +5180,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5332,7 +5341,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5483,7 +5492,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5644,7 +5653,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5815,7 +5824,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5976,7 +5985,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6147,7 +6156,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6298,7 +6307,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json index f36af0900365d..358efd5766fa7 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json @@ -1,204 +1,6 @@ { "components": { "schemas": { - "io.k8s.api.core.v1.NodeSelector": { - "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm" - } - ], - "default": {} - }, - "type": "array" - } - }, - "required": [ - "nodeSelectorTerms" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "io.k8s.api.core.v1.NodeSelectorRequirement": { - "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "properties": { - "key": { - "default": "", - "description": "The label key that the selector applies to.", - "type": "string" - }, - "operator": { - "default": "", - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "type": "string" - }, - "values": { - "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - "items": { - "default": "", - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "key", - "operator" - ], - "type": "object" - }, - "io.k8s.api.core.v1.NodeSelectorTerm": { - "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", - "properties": { - "matchExpressions": { - "description": "A list of node selector requirements by node's labels.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" - } - ], - "default": {} - }, - "type": "array" - }, - "matchFields": { - "description": "A list of node selector requirements by node's fields.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" - } - ], - "default": {} - }, - "type": "array" - } - }, - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDR": { - "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec" - } - ], - "default": {}, - "description": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { - "description": "ClusterCIDRList contains a list of ClusterCIDR.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "items is the list of ClusterCIDRs.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - ], - "default": {} - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDRList", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { - "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "properties": { - "ipv4": { - "default": "", - "description": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "ipv6": { - "default": "", - "description": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "nodeSelector": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" - } - ], - "description": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable." - }, - "perNodeHostBits": { - "default": 0, - "description": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "format": "int32", - "type": "integer" - } - }, - "required": [ - "perNodeHostBits" - ], - "type": "object" - }, "io.k8s.api.networking.v1alpha1.IPAddress": { "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "properties": { @@ -315,10 +117,6 @@ "resource": { "description": "Resource is the resource of the object being referenced.", "type": "string" - }, - "uid": { - "description": "UID is the uid of the object being referenced.", - "type": "string" } }, "type": "object" @@ -850,7 +648,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1131,7 +928,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1486,10 +1282,10 @@ ] } }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs": { + "/apis/networking.k8s.io/v1alpha1/ipaddresses": { "delete": { - "description": "delete collection of ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", + "description": "delete collection of IPAddress", + "operationId": "deleteNetworkingV1alpha1CollectionIPAddress", "parameters": [ { "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", @@ -1640,13 +1436,13 @@ "x-kubernetes-action": "deletecollection", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } }, "get": { - "description": "list or watch objects of kind ClusterCIDR", - "operationId": "listNetworkingV1alpha1ClusterCIDR", + "description": "list or watch objects of kind IPAddress", + "operationId": "listNetworkingV1alpha1IPAddress", "parameters": [ { "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", @@ -1744,27 +1540,27 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" } }, "application/json;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" } }, "application/vnd.kubernetes.protobuf;stream=watch": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" } } }, @@ -1780,13 +1576,13 @@ "x-kubernetes-action": "list", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1796,8 +1592,8 @@ } ], "post": { - "description": "create a ClusterCIDR", - "operationId": "createNetworkingV1alpha1ClusterCIDR", + "description": "create an IPAddress", + "operationId": "createNetworkingV1alpha1IPAddress", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -1831,27 +1627,28 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } - } + }, + "required": true }, "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -1861,17 +1658,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -1881,17 +1678,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -1907,15 +1704,15 @@ "x-kubernetes-action": "post", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } } }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { + "/apis/networking.k8s.io/v1alpha1/ipaddresses/{name}": { "delete": { - "description": "delete a ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1ClusterCIDR", + "description": "delete an IPAddress", + "operationId": "deleteNetworkingV1alpha1IPAddress", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2014,29 +1811,29 @@ "x-kubernetes-action": "delete", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } }, "get": { - "description": "read the specified ClusterCIDR", - "operationId": "readNetworkingV1alpha1ClusterCIDR", + "description": "read the specified IPAddress", + "operationId": "readNetworkingV1alpha1IPAddress", "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -2052,13 +1849,13 @@ "x-kubernetes-action": "get", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } }, "parameters": [ { - "description": "name of the ClusterCIDR", + "description": "name of the IPAddress", "in": "path", "name": "name", "required": true, @@ -2068,7 +1865,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2078,8 +1875,8 @@ } ], "patch": { - "description": "partially update the specified ClusterCIDR", - "operationId": "patchNetworkingV1alpha1ClusterCIDR", + "description": "partially update the specified IPAddress", + "operationId": "patchNetworkingV1alpha1IPAddress", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2140,24 +1937,25 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -2167,17 +1965,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -2193,13 +1991,13 @@ "x-kubernetes-action": "patch", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } }, "put": { - "description": "replace the specified ClusterCIDR", - "operationId": "replaceNetworkingV1alpha1ClusterCIDR", + "description": "replace the specified IPAddress", + "operationId": "replaceNetworkingV1alpha1IPAddress", "parameters": [ { "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", @@ -2233,27 +2031,28 @@ "content": { "*/*": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } - } + }, + "required": true }, "responses": { "200": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -2263,17 +2062,17 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/vnd.kubernetes.protobuf": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } }, "application/yaml": { "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" } } }, @@ -2289,1131 +2088,11 @@ "x-kubernetes-action": "put", "x-kubernetes-group-version-kind": { "group": "networking.k8s.io", - "kind": "ClusterCIDR", + "kind": "IPAddress", "version": "v1alpha1" } } }, - "/apis/networking.k8s.io/v1alpha1/ipaddresses": { - "delete": { - "description": "delete collection of IPAddress", - "operationId": "deleteNetworkingV1alpha1CollectionIPAddress", - "parameters": [ - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - }, - "get": { - "description": "list or watch objects of kind IPAddress", - "operationId": "listNetworkingV1alpha1IPAddress", - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddressList" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "post": { - "description": "create an IPAddress", - "operationId": "createNetworkingV1alpha1IPAddress", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "Created" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/ipaddresses/{name}": { - "delete": { - "description": "delete an IPAddress", - "operationId": "deleteNetworkingV1alpha1IPAddress", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - }, - "get": { - "description": "read the specified IPAddress", - "operationId": "readNetworkingV1alpha1IPAddress", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "name of the IPAddress", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "patch": { - "description": "partially update the specified IPAddress", - "operationId": "patchNetworkingV1alpha1IPAddress", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "in": "query", - "name": "force", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "application/apply-patch+yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/json-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/strategic-merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - }, - "put": { - "description": "replace the specified IPAddress", - "operationId": "replaceNetworkingV1alpha1IPAddress", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.IPAddress" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "IPAddress", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { - "get": { - "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchNetworkingV1alpha1ClusterCIDRList", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { - "get": { - "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, "/apis/networking.k8s.io/v1alpha1/watch/ipaddresses": { "get": { "description": "watch individual changes to a list of IPAddress. deprecated: use the 'watch' parameter with a list operation instead.", @@ -3510,7 +2189,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3671,7 +2350,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json index ee527019cafd7..d471d8ea390f8 100644 --- a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json @@ -33,12 +33,7 @@ "properties": { "podFixed": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "podFixed represents the fixed resource overhead associated with running a pod.", "type": "object" @@ -711,7 +706,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -992,7 +986,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1647,7 +1640,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1695,7 +1688,8 @@ "$ref": "#/components/schemas/io.k8s.api.node.v1.RuntimeClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -1929,7 +1923,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2001,7 +1995,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2097,7 +2092,8 @@ "$ref": "#/components/schemas/io.k8s.api.node.v1.RuntimeClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -2251,7 +2247,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2412,7 +2408,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__policy__v1_openapi.json b/api/openapi-spec/v3/apis__policy__v1_openapi.json index 8b304c287f672..92f92d7a71308 100644 --- a/api/openapi-spec/v3/apis__policy__v1_openapi.json +++ b/api/openapi-spec/v3/apis__policy__v1_openapi.json @@ -164,12 +164,7 @@ }, "disruptedPods": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", "type": "object" @@ -320,7 +315,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." }, "message": { @@ -829,7 +823,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1110,7 +1103,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1787,7 +1779,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1835,7 +1827,8 @@ "$ref": "#/components/schemas/io.k8s.api.policy.v1.PodDisruptionBudget" } } - } + }, + "required": true }, "responses": { "200": { @@ -2079,7 +2072,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2151,7 +2144,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2247,7 +2241,8 @@ "$ref": "#/components/schemas/io.k8s.api.policy.v1.PodDisruptionBudget" } } - } + }, + "required": true }, "responses": { "200": { @@ -2366,7 +2361,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2438,7 +2433,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2534,7 +2530,8 @@ "$ref": "#/components/schemas/io.k8s.api.policy.v1.PodDisruptionBudget" } } - } + }, + "required": true }, "responses": { "200": { @@ -2688,7 +2685,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2849,7 +2846,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3020,7 +3017,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3171,7 +3168,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json index ae81629f20d45..6e348f2248aec 100644 --- a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json @@ -1086,7 +1086,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1367,7 +1366,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2022,7 +2020,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2070,7 +2068,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.ClusterRoleBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -2304,7 +2303,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2376,7 +2375,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2472,7 +2472,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.ClusterRoleBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -2830,7 +2831,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2878,7 +2879,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.ClusterRole" } } - } + }, + "required": true }, "responses": { "200": { @@ -3112,7 +3114,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3184,7 +3186,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3280,7 +3283,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.ClusterRole" } } - } + }, + "required": true }, "responses": { "200": { @@ -3648,7 +3652,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3696,7 +3700,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.RoleBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -3940,7 +3945,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4012,7 +4017,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4108,7 +4114,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.RoleBinding" } } - } + }, + "required": true }, "responses": { "200": { @@ -4476,7 +4483,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4524,7 +4531,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.Role" } } - } + }, + "required": true }, "responses": { "200": { @@ -4768,7 +4776,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4840,7 +4848,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4936,7 +4945,8 @@ "$ref": "#/components/schemas/io.k8s.api.rbac.v1.Role" } } - } + }, + "required": true }, "responses": { "200": { @@ -5090,7 +5100,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5241,7 +5251,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5392,7 +5402,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5553,7 +5563,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5704,7 +5714,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5865,7 +5875,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6026,7 +6036,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6197,7 +6207,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6358,7 +6368,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6529,7 +6539,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6680,7 +6690,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6831,7 +6841,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json b/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json index 5c38aa50567d7..a8069c637af15 100644 --- a/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json +++ b/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha2_openapi.json @@ -1265,7 +1265,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -1546,7 +1545,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -2211,7 +2209,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2259,7 +2257,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext" } } - } + }, + "required": true }, "responses": { "200": { @@ -2503,7 +2502,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2575,7 +2574,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2671,7 +2671,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext" } } - } + }, + "required": true }, "responses": { "200": { @@ -2790,7 +2791,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2862,7 +2863,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2958,7 +2960,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.PodSchedulingContext" } } - } + }, + "required": true }, "responses": { "200": { @@ -3326,7 +3329,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3374,7 +3377,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } - } + }, + "required": true }, "responses": { "200": { @@ -3618,7 +3622,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3690,7 +3694,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3786,7 +3791,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } - } + }, + "required": true }, "responses": { "200": { @@ -3905,7 +3911,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3977,7 +3983,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4073,7 +4080,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaim" } } - } + }, + "required": true }, "responses": { "200": { @@ -4441,7 +4449,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4489,7 +4497,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } - } + }, + "required": true }, "responses": { "200": { @@ -4733,7 +4742,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4805,7 +4814,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4901,7 +4911,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" } } - } + }, + "required": true }, "responses": { "200": { @@ -5055,7 +5066,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5206,7 +5217,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5357,7 +5368,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5712,7 +5723,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5760,7 +5771,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -5994,7 +6006,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6066,7 +6078,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -6162,7 +6175,8 @@ "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha2.ResourceClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -6326,7 +6340,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6497,7 +6511,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6658,7 +6672,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6829,7 +6843,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6990,7 +7004,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7161,7 +7175,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7312,7 +7326,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7463,7 +7477,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7614,7 +7628,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7765,7 +7779,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7926,7 +7940,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json index 14877c182eae4..220c93beb5e09 100644 --- a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json @@ -624,7 +624,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -905,7 +904,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -1560,7 +1558,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1608,7 +1606,8 @@ "$ref": "#/components/schemas/io.k8s.api.scheduling.v1.PriorityClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -1842,7 +1841,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -1914,7 +1913,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -2010,7 +2010,8 @@ "$ref": "#/components/schemas/io.k8s.api.scheduling.v1.PriorityClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -2164,7 +2165,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -2325,7 +2326,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json index 275fb332b1951..80403e179de4f 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json @@ -661,12 +661,7 @@ }, "capacity": { "additionalProperties": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" - } - ], - "default": {} + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" }, "description": "capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", "type": "object" @@ -1273,7 +1268,7 @@ "type": "string" }, "podInfoOnMount": { - "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "type": "boolean" }, "requiresRepublish": { @@ -1908,7 +1903,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "time represents the time the error was encountered." } }, @@ -2518,7 +2512,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } ], - "default": {}, "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { @@ -2799,7 +2792,6 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" } ], - "default": {}, "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." }, "type": { @@ -3454,7 +3446,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3502,7 +3494,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.CSIDriver" } } - } + }, + "required": true }, "responses": { "200": { @@ -3736,7 +3729,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -3808,7 +3801,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -3904,7 +3898,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.CSIDriver" } } - } + }, + "required": true }, "responses": { "200": { @@ -4262,7 +4257,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4310,7 +4305,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.CSINode" } } - } + }, + "required": true }, "responses": { "200": { @@ -4544,7 +4540,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -4616,7 +4612,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -4712,7 +4709,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.CSINode" } } - } + }, + "required": true }, "responses": { "200": { @@ -4866,7 +4864,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5231,7 +5229,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5279,7 +5277,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.CSIStorageCapacity" } } - } + }, + "required": true }, "responses": { "200": { @@ -5523,7 +5522,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -5595,7 +5594,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -5691,7 +5691,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.CSIStorageCapacity" } } - } + }, + "required": true }, "responses": { "200": { @@ -6049,7 +6050,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6097,7 +6098,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.StorageClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -6331,7 +6333,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6403,7 +6405,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -6499,7 +6502,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.StorageClass" } } - } + }, + "required": true }, "responses": { "200": { @@ -6857,7 +6861,7 @@ }, "parameters": [ { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -6905,7 +6909,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.VolumeAttachment" } } - } + }, + "required": true }, "responses": { "200": { @@ -7139,7 +7144,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7211,7 +7216,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -7307,7 +7313,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.VolumeAttachment" } } - } + }, + "required": true }, "responses": { "200": { @@ -7416,7 +7423,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7488,7 +7495,8 @@ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" } } - } + }, + "required": true }, "responses": { "200": { @@ -7584,7 +7592,8 @@ "$ref": "#/components/schemas/io.k8s.api.storage.v1.VolumeAttachment" } } - } + }, + "required": true }, "responses": { "200": { @@ -7738,7 +7747,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -7899,7 +7908,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8050,7 +8059,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8211,7 +8220,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8362,7 +8371,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8523,7 +8532,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8694,7 +8703,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -8845,7 +8854,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9006,7 +9015,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9157,7 +9166,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { @@ -9318,7 +9327,7 @@ } }, { - "description": "If 'true', then the output is pretty printed.", + "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", "in": "query", "name": "pretty", "schema": { diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index 34a24f9c90ccf..2cb26245c0349 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.29.0-go1.21.1-bullseye.0 +v1.29.0-go1.21.3-bullseye.0 diff --git a/build/common.sh b/build/common.sh index 2935df356451d..dd493a1feba1c 100755 --- a/build/common.sh +++ b/build/common.sh @@ -96,8 +96,8 @@ readonly KUBE_RSYNC_PORT="${KUBE_RSYNC_PORT:-}" readonly KUBE_CONTAINER_RSYNC_PORT=8730 # These are the default versions (image tags) for their respective base images. -readonly __default_distroless_iptables_version=v0.3.2 -readonly __default_go_runner_version=v2.3.1-go1.21.1-bookworm.0 +readonly __default_distroless_iptables_version=v0.4.1 +readonly __default_go_runner_version=v2.3.1-go1.21.3-bookworm.0 readonly __default_setcap_version=bookworm-v1.0.0 # These are the base images for the Docker-wrapped binaries. @@ -540,6 +540,7 @@ function kube::build::run_build_command_ex() { --env "KUBE_CGO_OVERRIDES=' ${KUBE_CGO_OVERRIDES[*]:-} '" --env "FORCE_HOST_GO=${FORCE_HOST_GO:-}" --env "GO_VERSION=${GO_VERSION:-}" + --env "GOTOOLCHAIN=${GOTOOLCHAIN:-}" --env "GOFLAGS=${GOFLAGS:-}" --env "GOGCFLAGS=${GOGCFLAGS:-}" --env "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH:-}" @@ -676,7 +677,6 @@ function kube::build::sync_to_container() { # necessary. kube::build::rsync \ --delete \ - --filter='H /.git' \ --filter='- /_tmp/' \ --filter='- /_output/' \ --filter='- /' \ diff --git a/build/dependencies.yaml b/build/dependencies.yaml index 08c2849f36be2..161182ce378b2 100644 --- a/build/dependencies.yaml +++ b/build/dependencies.yaml @@ -97,13 +97,15 @@ dependencies: match: registry.k8s.io/node-problem-detector/node-problem-detector - path: cluster/addons/node-problem-detector/npd.yaml match: registry.k8s.io/node-problem-detector/node-problem-detector + - path: cluster/addons/node-problem-detector/npd.yaml + match: app.kubernetes.io/version # TODO(dims): Ensure newer versions get uploaded to # - https://console.cloud.google.com/storage/browser/gke-release/winnode/node-problem-detector # - https://gcsweb.k8s.io/gcs/kubernetes-release/node-problem-detector/ # and then the following references get fixed. # - # - path: cluster/gce/gci/configure.sh - # match: DEFAULT_NPD_VERSION= + - path: cluster/gce/gci/configure.sh + match: DEFAULT_NPD_VERSION= #- path: cluster/gce/windows/k8s-node-setup.psm1 # match: DEFAULT_NPD_VERSION @@ -116,7 +118,7 @@ dependencies: # Golang - name: "golang: upstream version" - version: 1.21.1 + version: 1.21.3 refPaths: - path: .go-version - path: build/build-image/cross/VERSION @@ -138,7 +140,7 @@ dependencies: match: minimum_go_version=go([0-9]+\.[0-9]+) - name: "registry.k8s.io/kube-cross: dependents" - version: v1.29.0-go1.21.1-bullseye.0 + version: v1.29.0-go1.21.3-bullseye.0 refPaths: - path: build/build-image/cross/VERSION @@ -158,9 +160,25 @@ dependencies: match: BASEIMAGE\?\=registry\.k8s\.io\/build-image\/debian-base-s390x:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) - path: test/conformance/image/Makefile match: BASE_IMAGE_VERSION\?= + - path: test/images/pets/peer-finder/BASEIMAGE + match: registry\.k8s\.io\/build-image\/debian-base-amd64:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: test/images/resource-consumer/BASEIMAGE + match: registry\.k8s\.io\/build-image\/debian-base-amd64:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: test/images/pets/zookeeper-installer/BASEIMAGE + match: registry\.k8s\.io\/build-image\/debian-base-amd64:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: test/images/nonroot/BASEIMAGE + match: registry\.k8s\.io\/build-image\/debian-base-amd64:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: test/images/regression-issue-74839/BASEIMAGE + match: registry\.k8s\.io\/build-image\/debian-base-amd64:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: test/images/pets/redis-installer/BASEIMAGE + match: registry\.k8s\.io\/build-image\/debian-base-amd64:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: cluster/gce/gci/configure-helper.sh + match: registry\.k8s\.io\/build-image\/debian-base:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) + - path: pkg/volume/plugins.go + match: registry\.k8s\.io\/build-image\/debian-base:[a-zA-Z]+\-v((([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?) - name: "registry.k8s.io/distroless-iptables: dependents" - version: v0.3.2 + version: v0.4.1 refPaths: - path: build/common.sh match: __default_distroless_iptables_version= @@ -168,7 +186,7 @@ dependencies: match: configs\[DistrolessIptables\] = Config{list\.BuildImageRegistry, "distroless-iptables", "v([0-9]+)\.([0-9]+)\.([0-9]+)"} - name: "registry.k8s.io/go-runner: dependents" - version: v2.3.1-go1.21.1-bookworm.0 + version: v2.3.1-go1.21.3-bookworm.0 refPaths: - path: build/common.sh match: __default_go_runner_version= diff --git a/build/root/Makefile b/build/root/Makefile index 42d00546c2ed9..3ca3a6ab109d7 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -228,7 +228,7 @@ define TEST_E2E_NODE_HELP_INFO # CONTAINER_RUNTIME_ENDPOINT: remote container endpoint to connect to. # Defaults to "/run/containerd/containerd.sock". # IMAGE_SERVICE_ENDPOINT: remote image endpoint to connect to, to prepull images. -# Used when RUNTIME is set to "remote". +# Defaults to CONTAINER_RUNTIME_ENDPOINT. # IMAGE_CONFIG_FILE: path to a file containing image configuration. # IMAGE_CONFIG_DIR: path to image config files. # SYSTEM_SPEC_NAME: The name of the system spec to be used for validating the diff --git a/cluster/addons/addon-manager/kube-addons.sh b/cluster/addons/addon-manager/kube-addons.sh index 843920ea5feb1..f20ae03afcb0f 100755 --- a/cluster/addons/addon-manager/kube-addons.sh +++ b/cluster/addons/addon-manager/kube-addons.sh @@ -133,7 +133,7 @@ if [ -n "${KUBECTL_EXTRA_PRUNE_WHITELIST:-}" ]; then read -ra extra_prune_allowlist <<< "${KUBECTL_EXTRA_PRUNE_WHITELIST}" fi prune_allowlist=( "${KUBECTL_PRUNE_WHITELIST[@]}" "${extra_prune_allowlist[@]}" ) -prune_allowlist_flags=$(generate_prune_allowallowlist_flags "${prune_allowlist[@]}") +prune_allowlist_flags=$(generate_prune_allowlist_flags "${prune_allowlist[@]}") log INFO "== Generated kubectl prune allowlist flags: $prune_allowlist_flags ==" diff --git a/cluster/addons/dns/OWNERS b/cluster/addons/dns/OWNERS index f3c51982f427b..1f0f2d5361b3b 100644 --- a/cluster/addons/dns/OWNERS +++ b/cluster/addons/dns/OWNERS @@ -4,9 +4,9 @@ approvers: - bowei - mrhohn - kl52752 - - dpasiukevich reviewers: - bowei - mrhohn - kl52752 +emeritus_approvers: - dpasiukevich diff --git a/cluster/addons/dns/coredns/coredns.yaml.base b/cluster/addons/dns/coredns/coredns.yaml.base index 87cc327dd416d..dd4570adb6525 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.base +++ b/cluster/addons/dns/coredns/coredns.yaml.base @@ -176,7 +176,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: diff --git a/cluster/addons/dns/coredns/coredns.yaml.in b/cluster/addons/dns/coredns/coredns.yaml.in index 75bc5d75ba3b7..6939faec3f999 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.in +++ b/cluster/addons/dns/coredns/coredns.yaml.in @@ -176,7 +176,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: diff --git a/cluster/addons/dns/coredns/coredns.yaml.sed b/cluster/addons/dns/coredns/coredns.yaml.sed index f98fe13410a0c..a90f2b7674a2a 100644 --- a/cluster/addons/dns/coredns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns/coredns.yaml.sed @@ -176,7 +176,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: diff --git a/cluster/addons/node-problem-detector/npd.yaml b/cluster/addons/node-problem-detector/npd.yaml index 459f28981f6de..7ba02511b188e 100644 --- a/cluster/addons/node-problem-detector/npd.yaml +++ b/cluster/addons/node-problem-detector/npd.yaml @@ -26,24 +26,22 @@ subjects: apiVersion: apps/v1 kind: DaemonSet metadata: - name: npd-v0.8.9 + name: node-problem-detector namespace: kube-system labels: - k8s-app: node-problem-detector - version: v0.8.9 - kubernetes.io/cluster-service: "true" + app.kubernetes.io/name: node-problem-detector + app.kubernetes.io/version: v0.8.13 addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: - k8s-app: node-problem-detector - version: v0.8.9 + app.kubernetes.io/name: node-problem-detector + app.kubernetes.io/version: v0.8.13 template: metadata: labels: - k8s-app: node-problem-detector - version: v0.8.9 - kubernetes.io/cluster-service: "true" + app.kubernetes.io/name: node-problem-detector + app.kubernetes.io/version: v0.8.13 spec: containers: - name: node-problem-detector @@ -69,6 +67,9 @@ spec: volumeMounts: - name: log mountPath: /var/log + - name: kmsg + mountPath: /dev/kmsg + readOnly: true - name: localtime mountPath: /etc/localtime readOnly: true @@ -76,6 +77,9 @@ spec: - name: log hostPath: path: /var/log/ + - name: kmsg + hostPath: + path: /dev/kmsg - name: localtime hostPath: path: /etc/localtime @@ -84,5 +88,7 @@ spec: tolerations: - operator: "Exists" effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" - key: "CriticalAddonsOnly" operator: "Exists" diff --git a/cluster/gce/addons/konnectivity-agent/konnectivity-agent-ds.yaml b/cluster/gce/addons/konnectivity-agent/konnectivity-agent-ds.yaml index 678b557d6eae7..5a5f24fd6e2a9 100644 --- a/cluster/gce/addons/konnectivity-agent/konnectivity-agent-ds.yaml +++ b/cluster/gce/addons/konnectivity-agent/konnectivity-agent-ds.yaml @@ -27,7 +27,7 @@ spec: nodeSelector: kubernetes.io/os: linux containers: - - image: registry.k8s.io/kas-network-proxy/proxy-agent:v0.1.2 + - image: registry.k8s.io/kas-network-proxy/proxy-agent:v0.28.0 name: konnectivity-agent command: ["/proxy-agent"] args: [ diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index e402c0c2bd6ef..b0e62f9488fa6 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -287,12 +287,7 @@ export ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER # none - Not run node problem detector. # daemonset - Run node problem detector as daemonset. # standalone - Run node problem detector as standalone system daemon. -if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then - # Enable standalone mode by default for gci. - ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}" -else - export ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}" -fi +export ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}" NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}" NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" @@ -564,4 +559,4 @@ fi # --image-credential-provider-bin-dir=${path-to-auth-provider-binary} # Also, it is required that DisableKubeletCloudCredentialProviders # feature gates are set to true for kubelet to use external credential provider. -export ENABLE_AUTH_PROVIDER_GCP="${ENABLE_AUTH_PROVIDER_GCP:-true}" \ No newline at end of file +export ENABLE_AUTH_PROVIDER_GCP="${ENABLE_AUTH_PROVIDER_GCP:-true}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index ea88f364b0324..dfb52dbc63d6f 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -329,13 +329,7 @@ export ENABLE_DNS_HORIZONTAL_AUTOSCALER=${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER: # none - Not run node problem detector. # daemonset - Run node problem detector as daemonset. # standalone - Run node problem detector as standalone system daemon. -if [[ "${NODE_OS_DISTRIBUTION}" = 'gci' ]]; then - # Enable standalone mode by default for gci. - ENABLE_NODE_PROBLEM_DETECTOR=${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone} -else - ENABLE_NODE_PROBLEM_DETECTOR=${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset} -fi -export ENABLE_NODE_PROBLEM_DETECTOR +export ENABLE_NODE_PROBLEM_DETECTOR=${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset} NODE_PROBLEM_DETECTOR_VERSION=${NODE_PROBLEM_DETECTOR_VERSION:-} NODE_PROBLEM_DETECTOR_TAR_HASH=${NODE_PROBLEM_DETECTOR_TAR_HASH:-} NODE_PROBLEM_DETECTOR_RELEASE_PATH=${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-} @@ -613,4 +607,4 @@ fi # --image-credential-provider-bin-dir=${path-to-auth-provider-binary} # Also, it is required that DisableKubeletCloudCredentialProviders and KubeletCredentialProviders # feature gates are set to true for kubelet to use external credential provider. -export ENABLE_AUTH_PROVIDER_GCP="${ENABLE_AUTH_PROVIDER_GCP:-true}" \ No newline at end of file +export ENABLE_AUTH_PROVIDER_GCP="${ENABLE_AUTH_PROVIDER_GCP:-true}" diff --git a/cluster/gce/gci/README.md b/cluster/gce/gci/README.md index 9cec32c2b5568..17c02fb03fb7c 100644 --- a/cluster/gce/gci/README.md +++ b/cluster/gce/gci/README.md @@ -79,7 +79,7 @@ following guidelines are proposed for image choice in E2E testing. 'image' should be used to specify the image. * To integrate continuously with other container - related technologies like runc, containerd, docker and kubernertes, the + related technologies like runc, containerd, docker and kubernetes, the latest LTS or stable images are preferred. 'image_family' should be used to specify the image. diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 4182e8ab38df3..dcf905f1043fe 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1765,6 +1765,7 @@ function prepare-kube-proxy-manifest-variables { if [[ -n "${DETECT_LOCAL_MODE:-}" ]]; then params+=" --detect-local-mode=${DETECT_LOCAL_MODE}" fi + local container_env="" local kube_cache_mutation_detector_env_name="" local kube_cache_mutation_detector_env_value="" @@ -1773,6 +1774,15 @@ function prepare-kube-proxy-manifest-variables { kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR" kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\"" fi + local kube_watchlist_inconsistency_detector_env_name="" + local kube_watchlist_inconsistency_detector_env_value="" + if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then + if [[ -z "${container_env}" ]]; then + container_env="env:" + fi + kube_watchlist_inconsistency_detector_env_name="- name: KUBE_WATCHLIST_INCONSISTENCY_DETECTOR" + kube_watchlist_inconsistency_detector_env_value="value: \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\"" + fi sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" "${src_file}" sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" "${src_file}" sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" "${src_file}" @@ -1782,6 +1792,8 @@ function prepare-kube-proxy-manifest-variables { sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}" sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" "${src_file}" sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" "${src_file}" + sed -i -e "s@{{kube_watchlist_inconsistency_detector_env_name}}@${kube_watchlist_inconsistency_detector_env_name}@g" "${src_file}" + sed -i -e "s@{{kube_watchlist_inconsistency_detector_env_value}}@${kube_watchlist_inconsistency_detector_env_value}@g" "${src_file}" sed -i -e "s@{{ cpurequest }}@${KUBE_PROXY_CPU_REQUEST:-100m}@g" "${src_file}" sed -i -e "s@{{ memoryrequest }}@${KUBE_PROXY_MEMORY_REQUEST:-50Mi}@g" "${src_file}" sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" "${src_file}" @@ -2213,7 +2225,16 @@ function start-kube-controller-manager { local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag) local container_env="" if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}]," + container_env="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}" + fi + if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then + if [[ -n "${container_env}" ]]; then + container_env="${container_env}, " + fi + container_env+="{\"name\": \"KUBE_WATCHLIST_INCONSISTENCY_DETECTOR\", \"value\": \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\"}" + fi + if [[ -n "${container_env}" ]]; then + container_env="\"env\":[${container_env}]," fi local paramstring @@ -2318,7 +2339,16 @@ function start-cloud-controller-manager { paramstring="$(convert-manifest-params "${params[*]}")" local container_env="" if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then - container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}]," + container_env="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}" + fi + if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then + if [[ -n "${container_env}" ]]; then + container_env="${container_env}, " + fi + container_env+="{\"name\": \"KUBE_WATCHLIST_INCONSISTENCY_DETECTOR\", \"value\": \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\"}" + fi + if [[ -n "${container_env}" ]]; then + container_env="\"env\":[${container_env}]," fi echo "Applying over-rides for manifest for cloud provider controller-manager" @@ -3111,7 +3141,7 @@ spec: - name: vol containers: - name: pv-recycler - image: registry.k8s.io/debian-base:v2.0.0 + image: registry.k8s.io/build-image/debian-base:bookworm-v1.0.0 command: - /bin/sh args: diff --git a/cluster/gce/gci/configure-kubeapiserver.sh b/cluster/gce/gci/configure-kubeapiserver.sh index db39e47eda89e..c0d570bca10b3 100644 --- a/cluster/gce/gci/configure-kubeapiserver.sh +++ b/cluster/gce/gci/configure-kubeapiserver.sh @@ -337,6 +337,12 @@ function start-kube-apiserver { if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then container_env+="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}" fi + if [[ -n "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-}" ]]; then + if [[ -n "${container_env}" ]]; then + container_env="${container_env}, " + fi + container_env+="{\"name\": \"KUBE_WATCHLIST_INCONSISTENCY_DETECTOR\", \"value\": \"${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR}\"}" + fi if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then if [[ -n "${container_env}" ]]; then container_env="${container_env}, " diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 751362e71dca6..72eec3d6a5296 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -26,10 +26,9 @@ set -o pipefail ### Hardcoded constants DEFAULT_CNI_VERSION='v1.3.0' DEFAULT_CNI_HASH='5d0324ca8a3c90c680b6e1fddb245a2255582fa15949ba1f3c6bb7323df9d3af754dae98d6e40ac9ccafb2999c932df2c4288d418949a4915d928eb23c090540' -DEFAULT_NPD_VERSION='v0.8.9' -DEFAULT_NPD_HASH_AMD64='4919c47447c5f3871c1dc3171bbb817a38c8c8d07a6ce55a77d43cadc098e9ad608ceeab121eec00c13c0b6a2cc3488544d61ce84cdade1823f3fd5163a952de' -# TODO (SergeyKanzhelev): fill up for npd 0.8.9+ -DEFAULT_NPD_HASH_ARM64='8ccb42a862efdfc1f25ca9a22f3fd36f9fdff1ac618dd7d39e3b5991505dd610d432364420896ad71f42197a116f28a85dde58b129baa075ebb7312caa57f852' +DEFAULT_NPD_VERSION='v0.8.13' +DEFAULT_NPD_HASH_AMD64='f9d8499741f06e76ec2426c8ccebb4b5102e1e45c183b6a19671e1ec61ff2568c354307959c9910a579014807ae674e8633c8c0ea2d878fafae1b136ec7ff5da' +DEFAULT_NPD_HASH_ARM64='c8f46c8d89a4f17df93d1f62a886de9d22cef29e6971354932cba1fa2837ebcacbd18888c0487034febe0b7f2eea289e3348e0b2f57f2f69a338f6ff8da20ed5' DEFAULT_CRICTL_VERSION='v1.28.0' DEFAULT_CRICTL_AMD64_SHA512='28824e32c48b9fc70318b2935ce49c3fd923c7855299b609eb2e18c65eee5734abf927aa1929cee3568a3f8fb3cb14aea7a1963271db621f23e3c55674428ed9' DEFAULT_CRICTL_ARM64_SHA512='3707b36328c6ebd6ce07cdb31c0680c9cc860d18a568d95da80501ee0dd1666094632f20f7ddebf06718573d4ef21a551c49cf32b646a1c0ac90b3f58c4475fa' diff --git a/cluster/gce/manifests/konnectivity-server.yaml b/cluster/gce/manifests/konnectivity-server.yaml index 745323446de5b..ad88a607a9756 100644 --- a/cluster/gce/manifests/konnectivity-server.yaml +++ b/cluster/gce/manifests/konnectivity-server.yaml @@ -20,7 +20,7 @@ spec: {{ disallow_privilege_escalation}} {{ capabilities }} {{ drop_capabilities }} - image: registry.k8s.io/kas-network-proxy/proxy-server:v0.1.2 + image: registry.k8s.io/kas-network-proxy/proxy-server:v0.28.0 resources: requests: cpu: 25m diff --git a/cluster/gce/manifests/kube-proxy.manifest b/cluster/gce/manifests/kube-proxy.manifest index 1171a2b77bb91..ede540b5a795a 100644 --- a/cluster/gce/manifests/kube-proxy.manifest +++ b/cluster/gce/manifests/kube-proxy.manifest @@ -31,6 +31,8 @@ spec: {{container_env}} {{kube_cache_mutation_detector_env_name}} {{kube_cache_mutation_detector_env_value}} + {{kube_watchlist_inconsistency_detector_env_name}} + {{kube_watchlist_inconsistency_detector_env_value}} securityContext: privileged: true volumeMounts: diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index d18836e0a9da7..7660b240ecce4 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -1173,6 +1173,7 @@ ENABLE_VOLUME_SNAPSHOTS: $(yaml-quote "${ENABLE_VOLUME_SNAPSHOTS:-}") ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote "${ENABLE_APISERVER_ADVANCED_AUDIT:-}") ENABLE_APISERVER_DYNAMIC_AUDIT: $(yaml-quote "${ENABLE_APISERVER_DYNAMIC_AUDIT:-}") ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote "${ENABLE_CACHE_MUTATION_DETECTOR:-false}") +ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR: $(yaml-quote "${ENABLE_KUBE_WATCHLIST_INCONSISTENCY_DETECTOR:-false}") ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote "${ENABLE_PATCH_CONVERSION_DETECTOR:-false}") ADVANCED_AUDIT_POLICY: $(yaml-quote "${ADVANCED_AUDIT_POLICY:-}") ADVANCED_AUDIT_BACKEND: $(yaml-quote "${ADVANCED_AUDIT_BACKEND:-log}") diff --git a/cluster/get-kube.sh b/cluster/get-kube.sh index 556be41fc3911..59aff60a2cee9 100755 --- a/cluster/get-kube.sh +++ b/cluster/get-kube.sh @@ -136,7 +136,7 @@ fi if [[ -d "./kubernetes" ]]; then if [[ -z "${KUBERNETES_SKIP_CONFIRM-}" ]]; then - echo "'kubernetes' directory already exist. Should we skip download step and start to create cluster based on it? [Y]/n" + echo "'kubernetes' directory already exists. Should we skip download step and start to create cluster based on it? [Y]/n" read -r confirm if [[ ! "${confirm}" =~ ^[nN]$ ]]; then echo "Skipping download step." diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh index d326747590ab5..66fac0f74e194 100755 --- a/cluster/log-dump/log-dump.sh +++ b/cluster/log-dump/log-dump.sh @@ -139,7 +139,10 @@ function copy-logs-from-node() { if [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then # get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true - gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true + # FIXME(dims): bug in gcloud prevents multiple source files specified using curly braces, so we just loop through for now + for single_file in "${files[@]}"; do + gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${single_file}" "${dir}" > /dev/null || true + done elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then local ip ip=$(get_ssh_hostname "${node}") diff --git a/cmd/cloud-controller-manager/nodeipamcontroller.go b/cmd/cloud-controller-manager/nodeipamcontroller.go index 19b41e4a20ae9..36c792ab22f45 100644 --- a/cmd/cloud-controller-manager/nodeipamcontroller.go +++ b/cmd/cloud-controller-manager/nodeipamcontroller.go @@ -26,8 +26,6 @@ import ( "net" "strings" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers/networking/v1alpha1" cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" cloudcontrollerconfig "k8s.io/cloud-provider/app/config" @@ -38,7 +36,6 @@ import ( nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam" nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" - "k8s.io/kubernetes/pkg/features" netutils "k8s.io/utils/net" ) @@ -128,14 +125,9 @@ func startNodeIpamController(ctx context.Context, initContext app.ControllerInit return nil, false, err } - var clusterCIDRInformer v1alpha1.ClusterCIDRInformer - if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { - clusterCIDRInformer = controllerCtx.InformerFactory.Networking().V1alpha1().ClusterCIDRs() - } nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx, controllerCtx.InformerFactory.Core().V1().Nodes(), - clusterCIDRInformer, cloud, controllerCtx.ClientBuilder.ClientOrDie(initContext.ClientName), clusterCIDRs, diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 0d4d480c10eca..290d20c95785c 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -92,7 +92,6 @@ func createAggregatorConfig( // we assume that the etcd options have been completed already. avoid messing with anything outside // of changes to StorageConfig as that may lead to unexpected behavior when the options are applied. etcdOptions := *commandOptions.Etcd - etcdOptions.StorageConfig.Paging = true etcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion) etcdOptions.StorageConfig.EncodeVersioner = runtime.NewMultiGroupVersioner(v1.SchemeGroupVersion, schema.GroupKind{Group: v1beta1.GroupName}) etcdOptions.SkipHealthEndpoints = true // avoid double wiring of health checks diff --git a/cmd/kube-apiserver/app/options/options_test.go b/cmd/kube-apiserver/app/options/options_test.go index 9dabfa2dd349e..ba5bf525b44a7 100644 --- a/cmd/kube-apiserver/app/options/options_test.go +++ b/cmd/kube-apiserver/app/options/options_test.go @@ -157,7 +157,6 @@ func TestAddFlags(t *testing.T) { CertFile: "/var/run/kubernetes/etcdce.crt", TracerProvider: oteltrace.NewNoopTracerProvider(), }, - Paging: true, Prefix: "/registry", CompactionInterval: storagebackend.DefaultCompactInterval, CountMetricPollPeriod: time.Minute, @@ -327,6 +326,13 @@ func TestAddFlags(t *testing.T) { expected.Authentication.OIDC.UsernameClaim = "sub" expected.Authentication.OIDC.SigningAlgs = []string{"RS256"} + if !s.Authorization.AreLegacyFlagsSet() { + t.Errorf("expected legacy authorization flags to be set") + } + + // setting the method to nil since methods can't be compared with reflect.DeepEqual + s.Authorization.AreLegacyFlagsSet = nil + if !reflect.DeepEqual(expected, s) { t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", cmp.Diff(expected, s, cmpopts.IgnoreUnexported(admission.Plugins{}, kubeoptions.OIDCAuthenticationOptions{}))) } diff --git a/cmd/kube-apiserver/app/options/validation.go b/cmd/kube-apiserver/app/options/validation.go index 53dcdf5e62845..38c8bce6d7614 100644 --- a/cmd/kube-apiserver/app/options/validation.go +++ b/cmd/kube-apiserver/app/options/validation.go @@ -130,6 +130,7 @@ func (s CompletedOptions) Validate() []error { var errs []error errs = append(errs, s.CompletedOptions.Validate()...) + errs = append(errs, s.CloudProvider.Validate()...) errs = append(errs, validateClusterIPFlags(s.Extra)...) errs = append(errs, validateServiceNodePort(s.Extra)...) errs = append(errs, validatePublicIPServiceClusterIPRangeIPFamilies(s.Extra, *s.GenericServerRunOptions)...) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 04deb53bee042..f8eefb7c7dd26 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -46,7 +46,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/util/keyutil" - cloudprovider "k8s.io/cloud-provider" cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/cli/globalflag" "k8s.io/component-base/logs" @@ -58,7 +57,6 @@ import ( "k8s.io/klog/v2" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -66,9 +64,9 @@ import ( "k8s.io/kubernetes/pkg/controlplane" controlplaneapiserver "k8s.io/kubernetes/pkg/controlplane/apiserver" "k8s.io/kubernetes/pkg/controlplane/reconcilers" + "k8s.io/kubernetes/pkg/features" generatedopenapi "k8s.io/kubernetes/pkg/generated/openapi" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" - kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" "k8s.io/kubernetes/pkg/serviceaccount" ) @@ -294,11 +292,6 @@ func CreateKubeAPIServerConfig(opts options.CompletedOptions) ( config.ExtraConfig.ClusterAuthenticationInfo.RequestHeaderUsernameHeaders = requestHeaderConfig.UsernameHeaders } - err = validateCloudProviderOptions(opts.CloudProvider) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to validate cloud provider: %w", err) - } - // setup admission admissionConfig := &kubeapiserveradmission.Config{ ExternalInformers: versionedInformers, @@ -363,34 +356,6 @@ func CreateKubeAPIServerConfig(opts options.CompletedOptions) ( return config, serviceResolver, pluginInitializers, nil } -func validateCloudProviderOptions(opts *kubeoptions.CloudProviderOptions) error { - if opts.CloudProvider == "" { - return nil - } - if opts.CloudProvider == "external" { - if !utilfeature.DefaultFeatureGate.Enabled(features.DisableCloudProviders) { - return fmt.Errorf("when using --cloud-provider set to '%s', "+ - "please set DisableCloudProviders feature to true", opts.CloudProvider) - } - if !utilfeature.DefaultFeatureGate.Enabled(features.DisableKubeletCloudCredentialProviders) { - return fmt.Errorf("when using --cloud-provider set to '%s', "+ - "please set DisableKubeletCloudCredentialProviders feature to true", opts.CloudProvider) - } - return nil - } else if cloudprovider.IsDeprecatedInternal(opts.CloudProvider) { - if utilfeature.DefaultFeatureGate.Enabled(features.DisableCloudProviders) { - return fmt.Errorf("when using --cloud-provider set to '%s', "+ - "please set DisableCloudProviders feature to false", opts.CloudProvider) - } - if utilfeature.DefaultFeatureGate.Enabled(features.DisableKubeletCloudCredentialProviders) { - return fmt.Errorf("when using --cloud-provider set to '%s', "+ - "please set DisableKubeletCloudCredentialProviders feature to false", opts.CloudProvider) - } - return nil - } - return fmt.Errorf("unknown --cloud-provider : %s", opts.CloudProvider) -} - var testServiceResolver webhook.ServiceResolver // SetServiceResolverForTests allows the service resolver to be overridden during tests. diff --git a/cmd/kube-controller-manager/app/batch.go b/cmd/kube-controller-manager/app/batch.go index e5edb4409f25e..ecee824a199eb 100644 --- a/cmd/kube-controller-manager/app/batch.go +++ b/cmd/kube-controller-manager/app/batch.go @@ -29,12 +29,16 @@ import ( ) func startJobController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { - go job.NewController( + jobController, err := job.NewController( ctx, controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.InformerFactory.Batch().V1().Jobs(), controllerContext.ClientBuilder.ClientOrDie("job-controller"), - ).Run(ctx, int(controllerContext.ComponentConfig.JobController.ConcurrentJobSyncs)) + ) + if err != nil { + return nil, true, fmt.Errorf("creating Job controller: %v", err) + } + go jobController.Run(ctx, int(controllerContext.ComponentConfig.JobController.ConcurrentJobSyncs)) return nil, true, nil } @@ -44,7 +48,7 @@ func startCronJobController(ctx context.Context, controllerContext ControllerCon controllerContext.ClientBuilder.ClientOrDie("cronjob-controller"), ) if err != nil { - return nil, true, fmt.Errorf("error creating CronJob controller V2: %v", err) + return nil, true, fmt.Errorf("creating CronJob controller V2: %v", err) } go cj2c.Run(ctx, int(controllerContext.ComponentConfig.CronJobController.ConcurrentCronJobSyncs)) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index b60dba90e20e7..11742b6adcf32 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -39,7 +39,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/mux" utilfeature "k8s.io/apiserver/pkg/util/feature" - cacheddiscovery "k8s.io/client-go/discovery/cached" + cacheddiscovery "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/informers" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/metadata" @@ -215,9 +215,8 @@ func Run(ctx context.Context, c *config.CompletedConfig) error { var unsecuredMux *mux.PathRecorderMux if c.SecureServing != nil { unsecuredMux = genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Generic.Debugging, healthzHandler) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(unsecuredMux) - } + slis.SLIMetricsWithReset{}.Install(unsecuredMux) + handler := genericcontrollermanager.BuildHandlerChain(unsecuredMux, &c.Authorization, &c.Authentication) // TODO: handle stoppedCh and listenerStoppedCh returned by c.SecureServing.Serve if _, _, err := c.SecureServing.Serve(handler, 0, stopCh); err != nil { @@ -482,7 +481,7 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc register(names.LegacyServiceAccountTokenCleanerController, startLegacySATokenCleaner) } if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidatingAdmissionPolicy) { - register("validatingadmissionpolicy-status-controller", startValidatingAdmissionPolicyStatusController) + register(names.ValidatingAdmissionPolicyStatusController, startValidatingAdmissionPolicyStatusController) } return controllers @@ -653,7 +652,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController if err != nil { return nil, false, fmt.Errorf("failed to build token generator: %v", err) } - controller, err := serviceaccountcontroller.NewTokensController( + tokenController, err := serviceaccountcontroller.NewTokensController( controllerContext.InformerFactory.Core().V1().ServiceAccounts(), controllerContext.InformerFactory.Core().V1().Secrets(), c.rootClientBuilder.ClientOrDie("tokens-controller"), @@ -665,7 +664,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController if err != nil { return nil, true, fmt.Errorf("error creating Tokens controller: %v", err) } - go controller.Run(ctx, int(controllerContext.ComponentConfig.SAController.ConcurrentSATokenSyncs)) + go tokenController.Run(ctx, int(controllerContext.ComponentConfig.SAController.ConcurrentSATokenSyncs)) // start the first set of informers now so that other controllers can start controllerContext.InformerFactory.Start(ctx.Done()) diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 1712f8026ab96..bbeb7a360ccf8 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -27,9 +27,7 @@ import ( "strings" "time" - "k8s.io/client-go/informers/networking/v1alpha1" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -150,15 +148,9 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo return nil, false, err } - var clusterCIDRInformer v1alpha1.ClusterCIDRInformer - if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { - clusterCIDRInformer = controllerContext.InformerFactory.Networking().V1alpha1().ClusterCIDRs() - } - nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx, controllerContext.InformerFactory.Core().V1().Nodes(), - clusterCIDRInformer, controllerContext.Cloud, controllerContext.ClientBuilder.ClientOrDie("node-controller"), clusterCIDRs, diff --git a/cmd/kube-controller-manager/app/plugins_providers.go b/cmd/kube-controller-manager/app/plugins_providers.go index 92a7625b2f74c..999b42b623d82 100644 --- a/cmd/kube-controller-manager/app/plugins_providers.go +++ b/cmd/kube-controller-manager/app/plugins_providers.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/portworx" "k8s.io/kubernetes/pkg/volume/rbd" - "k8s.io/kubernetes/pkg/volume/vsphere_volume" ) type probeFn func() []volume.VolumePlugin @@ -61,7 +60,6 @@ type pluginInfo struct { func appendAttachableLegacyProviderVolumes(logger klog.Logger, allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) { pluginMigrationStatus := make(map[string]pluginInfo) - pluginMigrationStatus[plugins.VSphereInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationvSphere, pluginUnregisterFeature: features.InTreePluginvSphereUnregister, pluginProbeFunction: vsphere_volume.ProbeVolumePlugins} pluginMigrationStatus[plugins.PortworxVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationPortworx, pluginUnregisterFeature: features.InTreePluginPortworxUnregister, pluginProbeFunction: portworx.ProbeVolumePlugins} pluginMigrationStatus[plugins.RBDVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationRBD, pluginUnregisterFeature: features.InTreePluginRBDUnregister, pluginProbeFunction: rbd.ProbeVolumePlugins} var err error diff --git a/cmd/kube-controller-manager/app/validatingadmissionpolicystatus.go b/cmd/kube-controller-manager/app/validatingadmissionpolicystatus.go index 833d24d77f7ed..ca5ceeb2b0e9f 100644 --- a/cmd/kube-controller-manager/app/validatingadmissionpolicystatus.go +++ b/cmd/kube-controller-manager/app/validatingadmissionpolicystatus.go @@ -23,6 +23,7 @@ import ( "k8s.io/apiserver/pkg/cel/openapi/resolver" "k8s.io/client-go/kubernetes/scheme" "k8s.io/controller-manager/controller" + "k8s.io/kubernetes/cmd/kube-controller-manager/names" "k8s.io/kubernetes/pkg/controller/validatingadmissionpolicystatus" "k8s.io/kubernetes/pkg/generated/openapi" ) @@ -35,7 +36,7 @@ func startValidatingAdmissionPolicyStatusController(ctx context.Context, control } c, err := validatingadmissionpolicystatus.NewController( controllerContext.InformerFactory.Admissionregistration().V1beta1().ValidatingAdmissionPolicies(), - controllerContext.ClientBuilder.ClientOrDie("validatingadmissionpolicy-status-controller").AdmissionregistrationV1beta1().ValidatingAdmissionPolicies(), + controllerContext.ClientBuilder.ClientOrDie(names.ValidatingAdmissionPolicyStatusController).AdmissionregistrationV1beta1().ValidatingAdmissionPolicies(), typeChecker, ) diff --git a/cmd/kube-proxy/app/conntrack.go b/cmd/kube-proxy/app/conntrack.go index 25a6f10b612e2..35ec5ca46434f 100644 --- a/cmd/kube-proxy/app/conntrack.go +++ b/cmd/kube-proxy/app/conntrack.go @@ -37,8 +37,12 @@ type Conntracker interface { SetMax(max int) error // SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established. SetTCPEstablishedTimeout(seconds int) error - // SetTCPCloseWaitTimeout nf_conntrack_tcp_timeout_close_wait. + // SetTCPCloseWaitTimeout adjusts nf_conntrack_tcp_timeout_close_wait. SetTCPCloseWaitTimeout(seconds int) error + // SetUDPTimeout adjusts nf_conntrack_udp_timeout. + SetUDPTimeout(seconds int) error + // SetUDPStreamTimeout adjusts nf_conntrack_udp_timeout_stream. + SetUDPStreamTimeout(seconds int) error } type realConntracker struct{} @@ -92,6 +96,14 @@ func (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error { return rct.setIntSysCtl("nf_conntrack_tcp_timeout_close_wait", seconds) } +func (rct realConntracker) SetUDPTimeout(seconds int) error { + return rct.setIntSysCtl("nf_conntrack_udp_timeout", seconds) +} + +func (rct realConntracker) SetUDPStreamTimeout(seconds int) error { + return rct.setIntSysCtl("nf_conntrack_udp_timeout_stream", seconds) +} + func (realConntracker) setIntSysCtl(name string, value int) error { entry := "net/netfilter/" + name diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index fee3d2f56db2c..529274c57cf70 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -19,6 +19,7 @@ limitations under the License. package app import ( + "context" goflag "flag" "fmt" "net" @@ -86,7 +87,7 @@ import ( utilflag "k8s.io/kubernetes/pkg/util/flag" "k8s.io/kubernetes/pkg/util/oom" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func init() { @@ -107,6 +108,8 @@ type Options struct { WriteConfigTo string // CleanupAndExit, when true, makes the proxy server clean up iptables and ipvs rules, then exit. CleanupAndExit bool + // InitAndExit, when true, makes the proxy server makes configurations that need privileged access, then exit. + InitAndExit bool // WindowsService should be set to true if kube-proxy is running as a service on Windows. // Its corresponding flag only gets registered in Windows builds WindowsService bool @@ -141,14 +144,25 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.") fs.StringVar(&o.WriteConfigTo, "write-config-to", o.WriteConfigTo, "If set, write the default configuration values to this file and exit.") - fs.StringVar(&o.config.ClientConnection.Kubeconfig, "kubeconfig", o.config.ClientConnection.Kubeconfig, "Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).") - fs.StringVar(&o.config.ClusterCIDR, "cluster-cidr", o.config.ClusterCIDR, "The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead. "+ - "For dual-stack clusters, a comma-separated list is accepted with at least one CIDR per IP family (IPv4 and IPv6). "+ + + fs.BoolVar(&o.CleanupAndExit, "cleanup", o.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") + + fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+ "This parameter is ignored if a config file is specified by --config.") - fs.StringVar(&o.config.ClientConnection.ContentType, "kube-api-content-type", o.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") + + fs.StringVar(&o.config.ClientConnection.Kubeconfig, "kubeconfig", o.config.ClientConnection.Kubeconfig, "Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).") fs.StringVar(&o.master, "master", o.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") - fs.StringVar(&o.hostnameOverride, "hostname-override", o.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") - fs.StringVar(&o.config.IPVS.Scheduler, "ipvs-scheduler", o.config.IPVS.Scheduler, "The ipvs scheduler type when proxy mode is ipvs") + fs.StringVar(&o.config.ClientConnection.ContentType, "kube-api-content-type", o.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") + fs.Int32Var(&o.config.ClientConnection.Burst, "kube-api-burst", o.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") + fs.Float32Var(&o.config.ClientConnection.QPS, "kube-api-qps", o.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") + + fs.StringVar(&o.hostnameOverride, "hostname-override", o.hostnameOverride, "If non-empty, will be used as the name of the Node that kube-proxy is running on. If unset, the node name is assumed to be the same as the node's hostname.") + fs.Var(&utilflag.IPVar{Val: &o.config.BindAddress}, "bind-address", "Overrides kube-proxy's idea of what its node's primary IP is. Note that the name is a historical artifact, and kube-proxy does not actually bind any sockets to this IP. This parameter is ignored if a config file is specified by --config.") + fs.Var(&utilflag.IPPortVar{Val: &o.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on, defaulting to \"0.0.0.0:10256\" (if --bind-address is unset or IPv4), or \"[::]:10256\" (if --bind-address is IPv6). Set empty to disable. This parameter is ignored if a config file is specified by --config.") + fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on, defaulting to \"127.0.0.1:10249\" (if --bind-address is unset or IPv4), or \"[::1]:10249\" (if --bind-address is IPv6). (Set to \"0.0.0.0:10249\" / \"[::]:10249\" to bind on all interfaces.) Set empty to disable. This parameter is ignored if a config file is specified by --config.") + fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit") + fs.BoolVar(&o.config.EnableProfiling, "profiling", o.config.EnableProfiling, "If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.") fs.StringVar(&o.config.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.config.ShowHiddenMetricsForVersion, "The previous version for which you want to show hidden metrics. "+ "Only the previous minor version is meaningful, other values will not be allowed. "+ @@ -156,59 +170,57 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ "rather than being surprised when they are permanently removed in the release after that. "+ "This parameter is ignored if a config file is specified by --config.") + fs.BoolVar(&o.InitAndExit, "init-only", o.InitAndExit, "If true, perform any initialization steps that must be done with full root privileges, and then exit. After doing this, you can run kube-proxy again with only the CAP_NET_ADMIN capability.") + fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'."+ + "This parameter is ignored if a config file is specified by --config.") - fs.StringSliceVar(&o.config.IPVS.ExcludeCIDRs, "ipvs-exclude-cidrs", o.config.IPVS.ExcludeCIDRs, "A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.") - fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses, - "A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. This parameter is ignored if a config file is specified by --config.") + fs.Int32Var(o.config.IPTables.MasqueradeBit, "iptables-masquerade-bit", ptr.Deref(o.config.IPTables.MasqueradeBit, 14), "If using the iptables or ipvs proxy mode, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") + fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the iptables or ipvs proxy mode, SNAT all traffic sent via Service cluster IPs. This may be required with some CNI plugins.") + fs.BoolVar(o.config.IPTables.LocalhostNodePorts, "iptables-localhost-nodeports", ptr.Deref(o.config.IPTables.LocalhostNodePorts, true), "If false, kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost. (Applies only to iptables mode and IPv4; localhost NodePorts are never allowed with other proxy modes or with IPv6.)") + fs.DurationVar(&o.config.IPTables.SyncPeriod.Duration, "iptables-sync-period", o.config.IPTables.SyncPeriod.Duration, "An interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.") + fs.DurationVar(&o.config.IPTables.MinSyncPeriod.Duration, "iptables-min-sync-period", o.config.IPTables.MinSyncPeriod.Duration, "The minimum period between iptables rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate iptables resync.") - fs.BoolVar(&o.CleanupAndExit, "cleanup", o.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") + fs.DurationVar(&o.config.IPVS.SyncPeriod.Duration, "ipvs-sync-period", o.config.IPVS.SyncPeriod.Duration, "An interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.") + fs.DurationVar(&o.config.IPVS.MinSyncPeriod.Duration, "ipvs-min-sync-period", o.config.IPVS.MinSyncPeriod.Duration, "The minimum period between IPVS rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate IPVS resync.") + fs.StringVar(&o.config.IPVS.Scheduler, "ipvs-scheduler", o.config.IPVS.Scheduler, "The ipvs scheduler type when proxy mode is ipvs") + fs.StringSliceVar(&o.config.IPVS.ExcludeCIDRs, "ipvs-exclude-cidrs", o.config.IPVS.ExcludeCIDRs, "A comma-separated list of CIDRs which the ipvs proxier should not touch when cleaning up IPVS rules.") + fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2") + fs.DurationVar(&o.config.IPVS.TCPTimeout.Duration, "ipvs-tcp-timeout", o.config.IPVS.TCPTimeout.Duration, "The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") + fs.DurationVar(&o.config.IPVS.TCPFinTimeout.Duration, "ipvs-tcpfin-timeout", o.config.IPVS.TCPFinTimeout.Duration, "The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") + fs.DurationVar(&o.config.IPVS.UDPTimeout.Duration, "ipvs-udp-timeout", o.config.IPVS.UDPTimeout.Duration, "The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") - fs.Var(&utilflag.IPVar{Val: &o.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces). This parameter is ignored if a config file is specified by --config.") - fs.Var(&utilflag.IPPortVar{Val: &o.config.HealthzBindAddress}, "healthz-bind-address", "The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.") - fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.") - fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit") - fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.") - fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'."+ - "This parameter is ignored if a config file is specified by --config.") - fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ - "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+ + fs.Var(&o.config.DetectLocalMode, "detect-local-mode", "Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.") + fs.StringVar(&o.config.DetectLocal.BridgeInterface, "pod-bridge-interface", o.config.DetectLocal.BridgeInterface, "A bridge interface name. When --detect-local-mode is set to BridgeInterface, kube-proxy will consider traffic to be local if it originates from this bridge.") + fs.StringVar(&o.config.DetectLocal.InterfaceNamePrefix, "pod-interface-name-prefix", o.config.DetectLocal.InterfaceNamePrefix, "An interface name prefix. When --detect-local-mode is set to InterfaceNamePrefix, kube-proxy will consider traffic to be local if it originates from any interface whose name begins with this prefix.") + fs.StringVar(&o.config.ClusterCIDR, "cluster-cidr", o.config.ClusterCIDR, "The CIDR range of the pods in the cluster. (For dual-stack clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When --detect-local-mode is set to ClusterCIDR, kube-proxy will consider traffic to be local if its source IP is in this range. (Otherwise it is not used.) "+ "This parameter is ignored if a config file is specified by --config.") - fs.Int32Var(&o.healthzPort, "healthz-port", o.healthzPort, "The port to bind the health check server. Use 0 to disable.") - fs.MarkDeprecated("healthz-port", "This flag is deprecated and will be removed in a future release. Please use --healthz-bind-address instead.") - fs.Int32Var(&o.metricsPort, "metrics-port", o.metricsPort, "The port to bind the metrics server. Use 0 to disable.") - fs.MarkDeprecated("metrics-port", "This flag is deprecated and will be removed in a future release. Please use --metrics-bind-address instead.") - fs.Int32Var(o.config.OOMScoreAdj, "oom-score-adj", pointer.Int32Deref(o.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.") - fs.Int32Var(o.config.IPTables.MasqueradeBit, "iptables-masquerade-bit", pointer.Int32Deref(o.config.IPTables.MasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") + fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses, + "A list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.") + + fs.Int32Var(o.config.OOMScoreAdj, "oom-score-adj", ptr.Deref(o.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.") fs.Int32Var(o.config.Conntrack.MaxPerCore, "conntrack-max-per-core", *o.config.Conntrack.MaxPerCore, "Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).") fs.Int32Var(o.config.Conntrack.Min, "conntrack-min", *o.config.Conntrack.Min, "Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).") - fs.Int32Var(&o.config.ClientConnection.Burst, "kube-api-burst", o.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") - fs.DurationVar(&o.config.IPTables.SyncPeriod.Duration, "iptables-sync-period", o.config.IPTables.SyncPeriod.Duration, "The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") - fs.DurationVar(&o.config.IPTables.MinSyncPeriod.Duration, "iptables-min-sync-period", o.config.IPTables.MinSyncPeriod.Duration, "The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.SyncPeriod.Duration, "ipvs-sync-period", o.config.IPVS.SyncPeriod.Duration, "The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") - fs.DurationVar(&o.config.IPVS.MinSyncPeriod.Duration, "ipvs-min-sync-period", o.config.IPVS.MinSyncPeriod.Duration, "The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.TCPTimeout.Duration, "ipvs-tcp-timeout", o.config.IPVS.TCPTimeout.Duration, "The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.TCPFinTimeout.Duration, "ipvs-tcpfin-timeout", o.config.IPVS.TCPFinTimeout.Duration, "The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") - fs.DurationVar(&o.config.IPVS.UDPTimeout.Duration, "ipvs-udp-timeout", o.config.IPVS.UDPTimeout.Duration, "The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').") fs.DurationVar(&o.config.Conntrack.TCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", o.config.Conntrack.TCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)") fs.DurationVar( &o.config.Conntrack.TCPCloseWaitTimeout.Duration, "conntrack-tcp-timeout-close-wait", o.config.Conntrack.TCPCloseWaitTimeout.Duration, "NAT timeout for TCP connections in the CLOSE_WAIT state") + fs.DurationVar(&o.config.Conntrack.UDPTimeout.Duration, "conntrack-udp-timeout", o.config.Conntrack.UDPTimeout.Duration, "Idle timeout for UNREPLIED UDP connections (0 to leave as-is)") + fs.DurationVar(&o.config.Conntrack.UDPStreamTimeout.Duration, "conntrack-udp-timeout-stream", o.config.Conntrack.UDPStreamTimeout.Duration, "Idle timeout for ASSURED UDP connections (0 to leave as-is)") + fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.") - fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2") - fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)") - fs.BoolVar(o.config.IPTables.LocalhostNodePorts, "iptables-localhost-nodeports", pointer.BoolDeref(o.config.IPTables.LocalhostNodePorts, true), "If false Kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost, This only applies to iptables mode and ipv4.") - fs.BoolVar(&o.config.EnableProfiling, "profiling", o.config.EnableProfiling, "If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.") + fs.Int32Var(&o.healthzPort, "healthz-port", o.healthzPort, "The port to bind the health check server. Use 0 to disable.") + _ = fs.MarkDeprecated("healthz-port", "This flag is deprecated and will be removed in a future release. Please use --healthz-bind-address instead.") + fs.Int32Var(&o.metricsPort, "metrics-port", o.metricsPort, "The port to bind the metrics server. Use 0 to disable.") + _ = fs.MarkDeprecated("metrics-port", "This flag is deprecated and will be removed in a future release. Please use --metrics-bind-address instead.") + fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "This was previously used to configure the userspace proxy, but is now unused.") + _ = fs.MarkDeprecated("proxy-port-range", "This flag has no effect and will be removed in a future release.") - fs.Float32Var(&o.config.ClientConnection.QPS, "kube-api-qps", o.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") - fs.Var(&o.config.DetectLocalMode, "detect-local-mode", "Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.") - fs.StringVar(&o.config.DetectLocal.BridgeInterface, "pod-bridge-interface", o.config.DetectLocal.BridgeInterface, "A bridge interface name in the cluster. Kube-proxy considers traffic as local if originating from an interface which matches the value. This argument should be set if DetectLocalMode is set to BridgeInterface.") - fs.StringVar(&o.config.DetectLocal.InterfaceNamePrefix, "pod-interface-name-prefix", o.config.DetectLocal.InterfaceNamePrefix, "An interface prefix in the cluster. Kube-proxy considers traffic as local if originating from interfaces that match the given prefix. This argument should be set if DetectLocalMode is set to InterfaceNamePrefix.") logsapi.AddFlags(&o.config.Logging, fs) } @@ -366,10 +378,13 @@ func (o *Options) Run() error { return cleanupAndExit() } - proxyServer, err := newProxyServer(o.config, o.master) + proxyServer, err := newProxyServer(o.config, o.master, o.InitAndExit) if err != nil { return err } + if o.InitAndExit { + return nil + } o.proxyServer = proxyServer return o.runLoop() @@ -568,7 +583,7 @@ type ProxyServer struct { Broadcaster events.EventBroadcaster Recorder events.EventRecorder NodeRef *v1.ObjectReference - HealthzServer healthcheck.ProxierHealthUpdater + HealthzServer *healthcheck.ProxierHealthServer Hostname string PrimaryIPFamily v1.IPFamily NodeIPs map[v1.IPFamily]net.IP @@ -579,7 +594,7 @@ type ProxyServer struct { } // newProxyServer creates a ProxyServer based on the given config -func newProxyServer(config *kubeproxyconfig.KubeProxyConfiguration, master string) (*ProxyServer, error) { +func newProxyServer(config *kubeproxyconfig.KubeProxyConfiguration, master string, initOnly bool) (*ProxyServer, error) { s := &ProxyServer{Config: config} cz, err := configz.New(kubeproxyconfig.GroupName) @@ -602,7 +617,8 @@ func newProxyServer(config *kubeproxyconfig.KubeProxyConfiguration, master strin return nil, err } - s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(s.Client, s.Hostname, config.BindAddress) + rawNodeIPs := getNodeIPs(s.Client, s.Hostname) + s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(rawNodeIPs, config.BindAddress) s.Broadcaster = events.NewBroadcaster(&events.EventSinkImpl{Interface: s.Client.EventsV1()}) s.Recorder = s.Broadcaster.NewRecorder(proxyconfigscheme.Scheme, "kube-proxy") @@ -615,7 +631,7 @@ func newProxyServer(config *kubeproxyconfig.KubeProxyConfiguration, master strin } if len(config.HealthzBindAddress) > 0 { - s.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, s.Recorder, s.NodeRef) + s.HealthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration) } err = s.platformSetup() @@ -642,7 +658,7 @@ func newProxyServer(config *kubeproxyconfig.KubeProxyConfiguration, master strin klog.ErrorS(err, "Kube-proxy configuration may be incomplete or incorrect") } - s.Proxier, err = s.createProxier(config, dualStackSupported) + s.Proxier, err = s.createProxier(config, dualStackSupported, initOnly) if err != nil { return nil, err } @@ -677,7 +693,7 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) { clusterCIDRs := strings.Split(s.Config.ClusterCIDR, ",") if badCIDRs(clusterCIDRs, badFamily) { errors = append(errors, fmt.Errorf("cluster is %s but clusterCIDRs contains only IPv%s addresses", clusterType, badFamily)) - if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeClusterCIDR { + if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeClusterCIDR && !dualStackSupported { // This has always been a fatal error fatal = true } @@ -776,7 +792,7 @@ func createClient(config componentbaseconfig.ClientConnectionConfiguration, mast return client, nil } -func serveHealthz(hz healthcheck.ProxierHealthUpdater, errCh chan error) { +func serveHealthz(hz *healthcheck.ProxierHealthServer, errCh chan error) { if hz == nil { return } @@ -805,9 +821,8 @@ func serveMetrics(bindAddress string, proxyMode kubeproxyconfig.ProxyMode, enabl proxyMux := mux.NewPathRecorderMux("kube-proxy") healthz.InstallHandler(proxyMux) - if utilfeature.DefaultFeatureGate.Enabled(metricsfeatures.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(proxyMux) - } + slis.SLIMetricsWithReset{}.Install(proxyMux) + proxyMux.HandleFunc("/proxyMode", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") @@ -863,16 +878,17 @@ func (s *ProxyServer) Run() error { // TODO(thockin): make it possible for healthz and metrics to be on the same port. - var errCh chan error + var healthzErrCh, metricsErrCh chan error if s.Config.BindAddressHardFail { - errCh = make(chan error) + healthzErrCh = make(chan error) + metricsErrCh = make(chan error) } // Start up a healthz server if requested - serveHealthz(s.HealthzServer, errCh) + serveHealthz(s.HealthzServer, healthzErrCh) // Start up a metrics server if requested - serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh) + serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, metricsErrCh) noProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil) if err != nil { @@ -937,7 +953,13 @@ func (s *ProxyServer) Run() error { go s.Proxier.SyncLoop() - return <-errCh + select { + case err = <-healthzErrCh: + s.Recorder.Eventf(s.NodeRef, nil, api.EventTypeWarning, "FailedToStartProxierHealthcheck", "StartKubeProxy", err.Error()) + case err = <-metricsErrCh: + s.Recorder.Eventf(s.NodeRef, nil, api.EventTypeWarning, "FailedToStartMetricServer", "StartKubeProxy", err.Error()) + } + return err } func (s *ProxyServer) birthCry() { @@ -955,30 +977,73 @@ func (s *ProxyServer) birthCry() { // // The order of precedence is: // 1. if bindAddress is not 0.0.0.0 or ::, then it is used as the primary IP. -// 2. if the Node object can be fetched, then its primary IP is used as the primary IP -// (and its secondary IP, if any, is just ignored). -// 3. otherwise the primary node IP is 127.0.0.1. -// -// In all cases, the secondary IP is the zero IP of the other IP family. -func detectNodeIPs(client clientset.Interface, hostname, bindAddress string) (v1.IPFamily, map[v1.IPFamily]net.IP) { - nodeIP := netutils.ParseIPSloppy(bindAddress) - if nodeIP.IsUnspecified() { - nodeIP = utilnode.GetNodeIP(client, hostname) +// 2. if rawNodeIPs is not empty, then its address(es) is/are used +// 3. otherwise the node IPs are 127.0.0.1 and ::1 +func detectNodeIPs(rawNodeIPs []net.IP, bindAddress string) (v1.IPFamily, map[v1.IPFamily]net.IP) { + primaryFamily := v1.IPv4Protocol + nodeIPs := map[v1.IPFamily]net.IP{ + v1.IPv4Protocol: net.IPv4(127, 0, 0, 1), + v1.IPv6Protocol: net.IPv6loopback, + } + + if len(rawNodeIPs) > 0 { + if !netutils.IsIPv4(rawNodeIPs[0]) { + primaryFamily = v1.IPv6Protocol + } + nodeIPs[primaryFamily] = rawNodeIPs[0] + if len(rawNodeIPs) > 1 { + // If more than one address is returned, they are guaranteed to be of different families + family := v1.IPv4Protocol + if !netutils.IsIPv4(rawNodeIPs[1]) { + family = v1.IPv6Protocol + } + nodeIPs[family] = rawNodeIPs[1] + } + } + + // If a bindAddress is passed, override the primary IP + bindIP := netutils.ParseIPSloppy(bindAddress) + if bindIP != nil && !bindIP.IsUnspecified() { + if netutils.IsIPv4(bindIP) { + primaryFamily = v1.IPv4Protocol + } else { + primaryFamily = v1.IPv6Protocol + } + nodeIPs[primaryFamily] = bindIP } - if nodeIP == nil { - klog.InfoS("Can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag") - nodeIP = netutils.ParseIPSloppy("127.0.0.1") + + if nodeIPs[primaryFamily].IsLoopback() { + klog.InfoS("Can't determine this node's IP, assuming loopback; if this is incorrect, please set the --bind-address flag") + } + return primaryFamily, nodeIPs +} + +// getNodeIP returns IPs for the node with the provided name. If +// required, it will wait for the node to be created. +func getNodeIPs(client clientset.Interface, name string) []net.IP { + var nodeIPs []net.IP + backoff := wait.Backoff{ + Steps: 6, + Duration: 1 * time.Second, + Factor: 2.0, + Jitter: 0.2, } - if netutils.IsIPv4(nodeIP) { - return v1.IPv4Protocol, map[v1.IPFamily]net.IP{ - v1.IPv4Protocol: nodeIP, - v1.IPv6Protocol: net.IPv6zero, + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + node, err := client.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + klog.ErrorS(err, "Failed to retrieve node info") + return false, nil } - } else { - return v1.IPv6Protocol, map[v1.IPFamily]net.IP{ - v1.IPv4Protocol: net.IPv4zero, - v1.IPv6Protocol: nodeIP, + nodeIPs, err = utilnode.GetNodeHostIPs(node) + if err != nil { + klog.ErrorS(err, "Failed to retrieve node IPs") + return false, nil } + return true, nil + }) + if err == nil { + klog.InfoS("Successfully retrieved node IP(s)", "IPs", nodeIPs) } + return nodeIPs } diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 517f44e20cf64..ff3fff7e66b28 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -125,7 +125,7 @@ func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, du } // createProxier creates the proxy.Provider -func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStack bool) (proxy.Provider, error) { +func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStack, initOnly bool) (proxy.Provider, error) { var proxier proxy.Provider var err error @@ -175,6 +175,7 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio s.Recorder, s.HealthzServer, config.NodePortAddresses, + initOnly, ) } else { // Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support). @@ -201,6 +202,7 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio s.Recorder, s.HealthzServer, config.NodePortAddresses, + initOnly, ) } @@ -247,6 +249,7 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio config.IPVS.Scheduler, config.NodePortAddresses, kernelHandler, + initOnly, ) } else { var localDetector proxyutiliptables.LocalTrafficDetector @@ -279,6 +282,7 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio config.IPVS.Scheduler, config.NodePortAddresses, kernelHandler, + initOnly, ) } if err != nil { @@ -329,6 +333,20 @@ func (s *ProxyServer) setupConntrack() error { } } + if s.Config.Conntrack.UDPTimeout.Duration > 0 { + timeout := int(s.Config.Conntrack.UDPTimeout.Duration / time.Second) + if err := ct.SetUDPTimeout(timeout); err != nil { + return err + } + } + + if s.Config.Conntrack.UDPStreamTimeout.Duration > 0 { + timeout := int(s.Config.Conntrack.UDPStreamTimeout.Duration / time.Second) + if err := ct.SetUDPStreamTimeout(timeout); err != nil { + return err + } + } + return nil } diff --git a/cmd/kube-proxy/app/server_others_test.go b/cmd/kube-proxy/app/server_others_test.go index cbb131e6df33a..959e2972b0189 100644 --- a/cmd/kube-proxy/app/server_others_test.go +++ b/cmd/kube-proxy/app/server_others_test.go @@ -40,7 +40,7 @@ import ( proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config" proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func Test_platformApplyDefaults(t *testing.T) { @@ -672,8 +672,8 @@ func TestGetConntrackMax(t *testing.T) { for i, tc := range testCases { cfg := proxyconfigapi.KubeProxyConntrackConfiguration{ - Min: pointer.Int32(tc.min), - MaxPerCore: pointer.Int32(tc.maxPerCore), + Min: ptr.To(tc.min), + MaxPerCore: ptr.To(tc.maxPerCore), } x, e := getConntrackMax(cfg) if e != nil { diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 74f05b182f348..5990181693f4b 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -17,9 +17,11 @@ limitations under the License. package app import ( + "context" "errors" "fmt" "io/ioutil" + "net" "path" "testing" "time" @@ -36,7 +38,8 @@ import ( componentbaseconfig "k8s.io/component-base/config" logsapi "k8s.io/component-base/logs/api/v1" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/utils/pointer" + netutils "k8s.io/utils/net" + "k8s.io/utils/ptr" ) // TestLoadConfig tests proper operation of loadConfig() @@ -195,8 +198,8 @@ nodePortAddresses: ClusterCIDR: tc.clusterCIDR, ConfigSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(2), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](2), + Min: ptr.To[int32](1), TCPCloseWaitTimeout: &metav1.Duration{Duration: 10 * time.Second}, TCPEstablishedTimeout: &metav1.Duration{Duration: 20 * time.Second}, }, @@ -205,8 +208,8 @@ nodePortAddresses: HostnameOverride: "foo", IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, - MasqueradeBit: pointer.Int32(17), - LocalhostNodePorts: pointer.Bool(true), + MasqueradeBit: ptr.To[int32](17), + LocalhostNodePorts: ptr.To(true), MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second}, SyncPeriod: metav1.Duration{Duration: 60 * time.Second}, }, @@ -217,7 +220,7 @@ nodePortAddresses: }, MetricsBindAddress: tc.metricsBindAddress, Mode: kubeproxyconfig.ProxyMode(tc.mode), - OOMScoreAdj: pointer.Int32(17), + OOMScoreAdj: ptr.To[int32](17), PortRange: "2-7", NodePortAddresses: []string{"10.20.30.40/16", "fd00:1::0/64"}, DetectLocalMode: kubeproxyconfig.LocalModeClusterCIDR, @@ -593,11 +596,7 @@ func TestAddressFromDeprecatedFlags(t *testing.T) { } } -func makeNodeWithAddresses(name, internal, external string) *v1.Node { - if name == "" { - return &v1.Node{} - } - +func makeNodeWithAddress(name, primaryIP string) *v1.Node { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -607,26 +606,76 @@ func makeNodeWithAddresses(name, internal, external string) *v1.Node { }, } - if internal != "" { + if primaryIP != "" { node.Status.Addresses = append(node.Status.Addresses, - v1.NodeAddress{Type: v1.NodeInternalIP, Address: internal}, + v1.NodeAddress{Type: v1.NodeInternalIP, Address: primaryIP}, ) } - if external != "" { - node.Status.Addresses = append(node.Status.Addresses, - v1.NodeAddress{Type: v1.NodeExternalIP, Address: external}, - ) + return node +} + +// Test that getNodeIPs retries on failure +func Test_getNodeIPs(t *testing.T) { + var chans [3]chan error + + client := clientsetfake.NewSimpleClientset( + // node1 initially has no IP address. + makeNodeWithAddress("node1", ""), + + // node2 initially has an invalid IP address. + makeNodeWithAddress("node2", "invalid-ip"), + + // node3 initially does not exist. + ) + + for i := range chans { + chans[i] = make(chan error) + ch := chans[i] + nodeName := fmt.Sprintf("node%d", i+1) + expectIP := fmt.Sprintf("192.168.0.%d", i+1) + go func() { + ips := getNodeIPs(client, nodeName) + if len(ips) == 0 { + ch <- fmt.Errorf("expected IP %s for %s but got nil", expectIP, nodeName) + } else if ips[0].String() != expectIP { + ch <- fmt.Errorf("expected IP %s for %s but got %s", expectIP, nodeName, ips[0].String()) + } else if len(ips) != 1 { + ch <- fmt.Errorf("expected IP %s for %s but got multiple IPs", expectIP, nodeName) + } + close(ch) + }() } - return node + // Give the goroutines time to fetch the bad/non-existent nodes, then fix them. + time.Sleep(1200 * time.Millisecond) + + _, _ = client.CoreV1().Nodes().UpdateStatus(context.TODO(), + makeNodeWithAddress("node1", "192.168.0.1"), + metav1.UpdateOptions{}, + ) + _, _ = client.CoreV1().Nodes().UpdateStatus(context.TODO(), + makeNodeWithAddress("node2", "192.168.0.2"), + metav1.UpdateOptions{}, + ) + _, _ = client.CoreV1().Nodes().Create(context.TODO(), + makeNodeWithAddress("node3", "192.168.0.3"), + metav1.CreateOptions{}, + ) + + // Ensure each getNodeIP completed as expected + for i := range chans { + err := <-chans[i] + if err != nil { + t.Error(err.Error()) + } + } } func Test_detectNodeIPs(t *testing.T) { cases := []struct { name string - nodeInfo *v1.Node - hostname string + rawNodeIPs []net.IP bindAddress string expectedFamily v1.IPFamily expectedIPv4 string @@ -634,120 +683,138 @@ func Test_detectNodeIPs(t *testing.T) { }{ { name: "Bind address IPv4 unicast address and no Node object", - nodeInfo: makeNodeWithAddresses("", "", ""), - hostname: "fakeHost", + rawNodeIPs: nil, bindAddress: "10.0.0.1", expectedFamily: v1.IPv4Protocol, expectedIPv4: "10.0.0.1", - expectedIPv6: "::", + expectedIPv6: "::1", }, { name: "Bind address IPv6 unicast address and no Node object", - nodeInfo: makeNodeWithAddresses("", "", ""), - hostname: "fakeHost", + rawNodeIPs: nil, bindAddress: "fd00:4321::2", expectedFamily: v1.IPv6Protocol, - expectedIPv4: "0.0.0.0", + expectedIPv4: "127.0.0.1", expectedIPv6: "fd00:4321::2", }, { - name: "No Valid IP found", - nodeInfo: makeNodeWithAddresses("", "", ""), - hostname: "fakeHost", + name: "No Valid IP found and no bind address", + rawNodeIPs: nil, bindAddress: "", expectedFamily: v1.IPv4Protocol, expectedIPv4: "127.0.0.1", - expectedIPv6: "::", - }, - // Disabled because the GetNodeIP method has a backoff retry mechanism - // and the test takes more than 30 seconds - // ok k8s.io/kubernetes/cmd/kube-proxy/app 34.136s - // { - // name: "No Valid IP found and unspecified bind address", - // nodeInfo: makeNodeWithAddresses("", "", ""), - // hostname: "fakeHost", - // bindAddress: "0.0.0.0", - // expectedFamily: v1.IPv4Protocol, - // expectedIPv4: "127.0.0.1", - // expectedIPv6: "::", - // }, + expectedIPv6: "::1", + }, + { + name: "No Valid IP found and unspecified bind address", + rawNodeIPs: nil, + bindAddress: "0.0.0.0", + expectedFamily: v1.IPv4Protocol, + expectedIPv4: "127.0.0.1", + expectedIPv6: "::1", + }, { name: "Bind address 0.0.0.0 and node with IPv4 InternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"), - hostname: "fakeHost", + rawNodeIPs: []net.IP{netutils.ParseIPSloppy("192.168.1.1")}, bindAddress: "0.0.0.0", expectedFamily: v1.IPv4Protocol, expectedIPv4: "192.168.1.1", - expectedIPv6: "::", + expectedIPv6: "::1", }, { name: "Bind address :: and node with IPv4 InternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"), - hostname: "fakeHost", + rawNodeIPs: []net.IP{netutils.ParseIPSloppy("192.168.1.1")}, bindAddress: "::", expectedFamily: v1.IPv4Protocol, expectedIPv4: "192.168.1.1", - expectedIPv6: "::", + expectedIPv6: "::1", }, { name: "Bind address 0.0.0.0 and node with IPv6 InternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"), - hostname: "fakeHost", + rawNodeIPs: []net.IP{netutils.ParseIPSloppy("fd00:1234::1")}, bindAddress: "0.0.0.0", expectedFamily: v1.IPv6Protocol, - expectedIPv4: "0.0.0.0", + expectedIPv4: "127.0.0.1", expectedIPv6: "fd00:1234::1", }, { name: "Bind address :: and node with IPv6 InternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"), - hostname: "fakeHost", + rawNodeIPs: []net.IP{netutils.ParseIPSloppy("fd00:1234::1")}, bindAddress: "::", expectedFamily: v1.IPv6Protocol, - expectedIPv4: "0.0.0.0", + expectedIPv4: "127.0.0.1", expectedIPv6: "fd00:1234::1", }, { - name: "Bind address 0.0.0.0 and node with only IPv4 ExternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"), - hostname: "fakeHost", - bindAddress: "0.0.0.0", - expectedFamily: v1.IPv4Protocol, - expectedIPv4: "90.90.90.90", - expectedIPv6: "::", - }, - { - name: "Bind address :: and node with only IPv4 ExternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"), - hostname: "fakeHost", + name: "Dual stack, primary IPv4", + rawNodeIPs: []net.IP{ + netutils.ParseIPSloppy("90.90.90.90"), + netutils.ParseIPSloppy("2001:db8::2"), + }, bindAddress: "::", expectedFamily: v1.IPv4Protocol, expectedIPv4: "90.90.90.90", - expectedIPv6: "::", + expectedIPv6: "2001:db8::2", }, { - name: "Bind address 0.0.0.0 and node with only IPv6 ExternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"), - hostname: "fakeHost", + name: "Dual stack, primary IPv6", + rawNodeIPs: []net.IP{ + netutils.ParseIPSloppy("2001:db8::2"), + netutils.ParseIPSloppy("90.90.90.90"), + }, bindAddress: "0.0.0.0", expectedFamily: v1.IPv6Protocol, - expectedIPv4: "0.0.0.0", + expectedIPv4: "90.90.90.90", expectedIPv6: "2001:db8::2", }, { - name: "Bind address :: and node with only IPv6 ExternalIP set", - nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"), - hostname: "fakeHost", - bindAddress: "::", + name: "Dual stack, override IPv4", + rawNodeIPs: []net.IP{ + netutils.ParseIPSloppy("2001:db8::2"), + netutils.ParseIPSloppy("90.90.90.90"), + }, + bindAddress: "80.80.80.80", + expectedFamily: v1.IPv4Protocol, + expectedIPv4: "80.80.80.80", + expectedIPv6: "2001:db8::2", + }, + { + name: "Dual stack, override IPv6", + rawNodeIPs: []net.IP{ + netutils.ParseIPSloppy("90.90.90.90"), + netutils.ParseIPSloppy("2001:db8::2"), + }, + bindAddress: "2001:db8::555", expectedFamily: v1.IPv6Protocol, - expectedIPv4: "0.0.0.0", + expectedIPv4: "90.90.90.90", + expectedIPv6: "2001:db8::555", + }, + { + name: "Dual stack, override primary family, IPv4", + rawNodeIPs: []net.IP{ + netutils.ParseIPSloppy("2001:db8::2"), + netutils.ParseIPSloppy("90.90.90.90"), + }, + bindAddress: "127.0.0.1", + expectedFamily: v1.IPv4Protocol, + expectedIPv4: "127.0.0.1", expectedIPv6: "2001:db8::2", }, + { + name: "Dual stack, override primary family, IPv6", + rawNodeIPs: []net.IP{ + netutils.ParseIPSloppy("90.90.90.90"), + netutils.ParseIPSloppy("2001:db8::2"), + }, + bindAddress: "::1", + expectedFamily: v1.IPv6Protocol, + expectedIPv4: "90.90.90.90", + expectedIPv6: "::1", + }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - client := clientsetfake.NewSimpleClientset(c.nodeInfo) - primaryFamily, ips := detectNodeIPs(client, c.hostname, c.bindAddress) + primaryFamily, ips := detectNodeIPs(c.rawNodeIPs, c.bindAddress) if primaryFamily != c.expectedFamily { t.Errorf("Expected family %q got %q", c.expectedFamily, primaryFamily) } @@ -763,11 +830,12 @@ func Test_detectNodeIPs(t *testing.T) { func Test_checkIPConfig(t *testing.T) { cases := []struct { - name string - proxy *ProxyServer - ssErr bool - dsErr bool - fatal bool + name string + proxy *ProxyServer + ssErr bool + ssFatal bool + dsErr bool + dsFatal bool }{ { name: "empty config", @@ -820,9 +888,10 @@ func Test_checkIPConfig(t *testing.T) { }, PrimaryIPFamily: v1.IPv4Protocol, }, - ssErr: true, - dsErr: true, - fatal: false, + ssErr: true, + ssFatal: false, + dsErr: true, + dsFatal: false, }, { name: "wrong-family clusterCIDR when using ClusterCIDR LocalDetector", @@ -833,9 +902,10 @@ func Test_checkIPConfig(t *testing.T) { }, PrimaryIPFamily: v1.IPv4Protocol, }, - ssErr: true, - dsErr: true, - fatal: true, + ssErr: true, + ssFatal: true, + dsErr: true, + dsFatal: false, }, { @@ -879,9 +949,10 @@ func Test_checkIPConfig(t *testing.T) { }, PrimaryIPFamily: v1.IPv6Protocol, }, - ssErr: true, - dsErr: true, - fatal: false, + ssErr: true, + ssFatal: false, + dsErr: true, + dsFatal: false, }, { @@ -929,9 +1000,10 @@ func Test_checkIPConfig(t *testing.T) { PrimaryIPFamily: v1.IPv4Protocol, podCIDRs: []string{"fd01:2345::/64"}, }, - ssErr: true, - dsErr: true, - fatal: true, + ssErr: true, + ssFatal: true, + dsErr: true, + dsFatal: true, }, { @@ -957,9 +1029,10 @@ func Test_checkIPConfig(t *testing.T) { }, PrimaryIPFamily: v1.IPv4Protocol, }, - ssErr: true, - dsErr: true, - fatal: false, + ssErr: true, + ssFatal: false, + dsErr: true, + dsFatal: false, }, { @@ -1003,9 +1076,9 @@ func Test_checkIPConfig(t *testing.T) { }, PrimaryIPFamily: v1.IPv6Protocol, }, - ssErr: true, - dsErr: false, - fatal: false, + ssErr: true, + ssFatal: false, + dsErr: false, }, { @@ -1031,9 +1104,9 @@ func Test_checkIPConfig(t *testing.T) { }, PrimaryIPFamily: v1.IPv6Protocol, }, - ssErr: true, - dsErr: false, - fatal: false, + ssErr: true, + ssFatal: false, + dsErr: false, }, } @@ -1044,8 +1117,8 @@ func Test_checkIPConfig(t *testing.T) { t.Errorf("unexpected error in single-stack case: %v", err) } else if err == nil && c.ssErr { t.Errorf("unexpected lack of error in single-stack case") - } else if fatal != c.fatal { - t.Errorf("expected fatal=%v, got %v", c.fatal, fatal) + } else if fatal != c.ssFatal { + t.Errorf("expected fatal=%v, got %v", c.ssFatal, fatal) } err, fatal = checkIPConfig(c.proxy, true) @@ -1053,8 +1126,8 @@ func Test_checkIPConfig(t *testing.T) { t.Errorf("unexpected error in dual-stack case: %v", err) } else if err == nil && c.dsErr { t.Errorf("unexpected lack of error in dual-stack case") - } else if fatal != c.fatal { - t.Errorf("expected fatal=%v, got %v", c.fatal, fatal) + } else if fatal != c.dsFatal { + t.Errorf("expected fatal=%v, got %v", c.dsFatal, fatal) } }) } diff --git a/cmd/kube-proxy/app/server_windows.go b/cmd/kube-proxy/app/server_windows.go index 62adf4a41d1f1..90f817c1badd8 100644 --- a/cmd/kube-proxy/app/server_windows.go +++ b/cmd/kube-proxy/app/server_windows.go @@ -30,6 +30,7 @@ import ( // Enable pprof HTTP handlers. _ "net/http/pprof" + v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/proxy" proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/config" "k8s.io/kubernetes/pkg/proxy/winkernel" @@ -48,6 +49,12 @@ func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfigur // platform-specific setup. func (s *ProxyServer) platformSetup() error { winkernel.RegisterMetrics() + // Preserve backward-compatibility with the old secondary IP behavior + if s.PrimaryIPFamily == v1.IPv4Protocol { + s.NodeIPs[v1.IPv6Protocol] = net.IPv6zero + } else { + s.NodeIPs[v1.IPv4Protocol] = net.IPv4zero + } return nil } @@ -72,7 +79,10 @@ func (s *ProxyServer) platformCheckSupported() (ipv4Supported, ipv6Supported, du } // createProxier creates the proxy.Provider -func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStackMode bool) (proxy.Provider, error) { +func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguration, dualStackMode, initOnly bool) (proxy.Provider, error) { + if initOnly { + return nil, fmt.Errorf("--init-only is not implemented on Windows") + } var healthzPort int if len(config.HealthzBindAddress) > 0 { _, port, _ := net.SplitHostPort(config.HealthzBindAddress) @@ -96,6 +106,7 @@ func (s *ProxyServer) createProxier(config *proxyconfigapi.KubeProxyConfiguratio ) } else { proxier, err = winkernel.NewProxier( + s.PrimaryIPFamily, config.IPTables.SyncPeriod.Duration, config.IPTables.MinSyncPeriod.Duration, config.ClusterCIDR, diff --git a/cmd/kube-scheduler/app/options/options_test.go b/cmd/kube-scheduler/app/options/options_test.go index d46d04053e02b..cb721e39b83d7 100644 --- a/cmd/kube-scheduler/app/options/options_test.go +++ b/cmd/kube-scheduler/app/options/options_test.go @@ -41,7 +41,7 @@ import ( configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing" "k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestSchedulerOptions(t *testing.T) { @@ -279,7 +279,7 @@ profiles: defaultPodInitialBackoffSeconds := int64(1) defaultPodMaxBackoffSeconds := int64(10) - defaultPercentageOfNodesToScore := pointer.Int32(0) + defaultPercentageOfNodesToScore := ptr.To[int32](0) testcases := []struct { name string diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go index c48b09a420d6f..4a7e171ae3094 100644 --- a/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -294,9 +294,8 @@ func newHealthzAndMetricsHandler(config *kubeschedulerconfig.KubeSchedulerConfig pathRecorderMux := mux.NewPathRecorderMux("kube-scheduler") healthz.InstallHandler(pathRecorderMux, checks...) installMetricHandler(pathRecorderMux, informers, isLeader) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(pathRecorderMux) - } + slis.SLIMetricsWithReset{}.Install(pathRecorderMux) + if config.EnableProfiling { routes.Profiling{}.Install(pathRecorderMux) if config.EnableContentionProfiling { diff --git a/cmd/kube-scheduler/app/server_test.go b/cmd/kube-scheduler/app/server_test.go index 2293c15fa194d..6f3e15d06301f 100644 --- a/cmd/kube-scheduler/app/server_test.go +++ b/cmd/kube-scheduler/app/server_test.go @@ -450,7 +450,7 @@ func (*foo) Name() string { return "Foo" } -func newFoo(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newFoo(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &foo{}, nil } diff --git a/cmd/kubeadm/OWNERS b/cmd/kubeadm/OWNERS index 22ae068d14731..81af60d2d17d4 100644 --- a/cmd/kubeadm/OWNERS +++ b/cmd/kubeadm/OWNERS @@ -1,18 +1,17 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - fabriziopandini - neolit123 - SataQiu - pacoxu + - chendave reviewers: - - fabriziopandini - neolit123 - SataQiu - pacoxu - - RA489 - chendave emeritus_approvers: + - fabriziopandini - luxas - timothysc - rosti diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index e73495384e925..60e1f44032d47 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -87,10 +87,11 @@ func fuzzClusterConfiguration(obj *kubeadm.ClusterConfiguration, c fuzz.Continue obj.APIServer.TimeoutForControlPlane = &metav1.Duration{ Duration: constants.DefaultControlPlaneTimeout, } - obj.ControllerManager.ExtraEnvs = []corev1.EnvVar{} - obj.APIServer.ExtraEnvs = []corev1.EnvVar{} - obj.Scheduler.ExtraEnvs = []corev1.EnvVar{} - obj.Etcd.Local.ExtraEnvs = []corev1.EnvVar{} + obj.ControllerManager.ExtraEnvs = []kubeadm.EnvVar{} + obj.APIServer.ExtraEnvs = []kubeadm.EnvVar{} + obj.Scheduler.ExtraEnvs = []kubeadm.EnvVar{} + obj.Etcd.Local.ExtraEnvs = []kubeadm.EnvVar{} + obj.EncryptionAlgorithm = kubeadm.EncryptionAlgorithmRSA } func fuzzDNS(obj *kubeadm.DNS, c fuzz.Continue) { diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index e2ac31cbacb5d..617994600a8cf 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -17,8 +17,6 @@ limitations under the License. package kubeadm import ( - "crypto/x509" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -141,6 +139,10 @@ type ClusterConfiguration struct { // The cluster name ClusterName string + + // EncryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + // Can be "RSA" (default algorithm, key size is 2048) or "ECDSA" (uses the P-256 elliptic curve). + EncryptionAlgorithm EncryptionAlgorithmType } // ControlPlaneComponent holds settings common to control plane component of the cluster @@ -157,7 +159,7 @@ type ControlPlaneComponent struct { // ExtraEnvs is an extra set of environment variables to pass to the control plane component. // Environment variables passed using ExtraEnvs will override any existing environment variables, or *_proxy environment variables that kubeadm adds by default. // +optional - ExtraEnvs []v1.EnvVar + ExtraEnvs []EnvVar } // APIServer holds settings necessary for API server deployments in the cluster @@ -275,7 +277,7 @@ type LocalEtcd struct { // ExtraEnvs is an extra set of environment variables to pass to the control plane component. // Environment variables passed using ExtraEnvs will override any existing environment variables, or *_proxy environment variables that kubeadm adds by default. // +optional - ExtraEnvs []v1.EnvVar + ExtraEnvs []EnvVar // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. ServerCertSANs []string @@ -403,13 +405,18 @@ func (cfg *ClusterConfiguration) GetControlPlaneImageRepository() string { return cfg.ImageRepository } -// PublicKeyAlgorithm returns the type of encryption keys used in the cluster. -func (cfg *ClusterConfiguration) PublicKeyAlgorithm() x509.PublicKeyAlgorithm { - if features.Enabled(cfg.FeatureGates, features.PublicKeysECDSA) { - return x509.ECDSA +// EncryptionAlgorithmType returns the type of encryption keys used in the cluster. +func (cfg *ClusterConfiguration) EncryptionAlgorithmType() EncryptionAlgorithmType { + // If the feature gate is set to true, or false respect it. + // If the feature gate is not set, use the EncryptionAlgorithm field (v1beta4). + // TODO: remove this function when the feature gate is removed. + if enabled, ok := cfg.FeatureGates[features.PublicKeysECDSA]; ok { + if enabled { + return EncryptionAlgorithmECDSA + } + return EncryptionAlgorithmRSA } - - return x509.RSA + return cfg.EncryptionAlgorithm } // HostPathMount contains elements describing volumes that are mounted from the @@ -513,3 +520,18 @@ type Arg struct { Name string Value string } + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + v1.EnvVar +} + +// EncryptionAlgorithmType can define an asymmetric encryption algorithm type. +type EncryptionAlgorithmType string + +const ( + // EncryptionAlgorithmECDSA defines the ECDSA encryption algorithm type. + EncryptionAlgorithmECDSA EncryptionAlgorithmType = "ECDSA" + // EncryptionAlgorithmRSA defines the RSA encryption algorithm type. + EncryptionAlgorithmRSA EncryptionAlgorithmType = "RSA" +) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta3/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1beta3/conversion.go index 1b19007104e9e..80b45a5553382 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta3/conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta3/conversion.go @@ -19,36 +19,56 @@ package v1beta3 import ( "sort" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) +// Convert_kubeadm_InitConfiguration_To_v1beta3_InitConfiguration converts a private InitConfiguration to public InitConfiguration. func Convert_kubeadm_InitConfiguration_To_v1beta3_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { return autoConvert_kubeadm_InitConfiguration_To_v1beta3_InitConfiguration(in, out, s) } +// Convert_kubeadm_JoinConfiguration_To_v1beta3_JoinConfiguration converts a private JoinConfiguration to public JoinConfiguration. func Convert_kubeadm_JoinConfiguration_To_v1beta3_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error { return autoConvert_kubeadm_JoinConfiguration_To_v1beta3_JoinConfiguration(in, out, s) } +// Convert_v1beta3_InitConfiguration_To_kubeadm_InitConfiguration converts a public InitConfiguration to private InitConfiguration. func Convert_v1beta3_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error { err := autoConvert_v1beta3_InitConfiguration_To_kubeadm_InitConfiguration(in, out, s) if err != nil { return err } err = Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(&ClusterConfiguration{}, &out.ClusterConfiguration, s) + // Required to pass fuzzer tests. This ClusterConfiguration is empty and is never defaulted. + // If we call Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration() it will receive + // a default value, thus here we need to reset it back to "". + out.EncryptionAlgorithm = "" return err } +// Convert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration is required due to missing EncryptionAlgorithm in v1beta3. +func Convert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error { + return autoConvert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(in, out, s) +} + +// Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration is required due to missing EncryptionAlgorithm in v1beta3. +func Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in *ClusterConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error { + // Required to pass validation and fuzzer tests. The field is missing in v1beta3, thus we have to + // default it to a sane (default) value in the internal type. + out.EncryptionAlgorithm = kubeadm.EncryptionAlgorithmRSA + return autoConvert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in, out, s) +} + // Convert_v1beta3_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent is required due to the missing ControlPlaneComponent.ExtraEnvs in v1beta3. func Convert_v1beta3_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error { - out.ExtraEnvs = []v1.EnvVar{} + out.ExtraEnvs = []kubeadm.EnvVar{} out.ExtraArgs = convertToArgs(in.ExtraArgs) return autoConvert_v1beta3_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in, out, s) } +// Convert_kubeadm_ControlPlaneComponent_To_v1beta3_ControlPlaneComponent converts a private ControlPlaneComponent to public ControlPlaneComponent. func Convert_kubeadm_ControlPlaneComponent_To_v1beta3_ControlPlaneComponent(in *kubeadm.ControlPlaneComponent, out *ControlPlaneComponent, s conversion.Scope) error { out.ExtraArgs = convertFromArgs(in.ExtraArgs) return autoConvert_kubeadm_ControlPlaneComponent_To_v1beta3_ControlPlaneComponent(in, out, s) @@ -56,21 +76,24 @@ func Convert_kubeadm_ControlPlaneComponent_To_v1beta3_ControlPlaneComponent(in * // Convert_v1beta3_LocalEtcd_To_kubeadm_LocalEtcd is required due to the missing LocalEtcd.ExtraEnvs in v1beta3. func Convert_v1beta3_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { - out.ExtraEnvs = []v1.EnvVar{} + out.ExtraEnvs = []kubeadm.EnvVar{} out.ExtraArgs = convertToArgs(in.ExtraArgs) return autoConvert_v1beta3_LocalEtcd_To_kubeadm_LocalEtcd(in, out, s) } +// Convert_kubeadm_LocalEtcd_To_v1beta3_LocalEtcd converts a private LocalEtcd to public LocalEtcd. func Convert_kubeadm_LocalEtcd_To_v1beta3_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { out.ExtraArgs = convertFromArgs(in.ExtraArgs) return autoConvert_kubeadm_LocalEtcd_To_v1beta3_LocalEtcd(in, out, s) } +// Convert_v1beta3_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions converts a public NodeRegistrationOptions to private NodeRegistrationOptions. func Convert_v1beta3_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { out.KubeletExtraArgs = convertToArgs(in.KubeletExtraArgs) return autoConvert_v1beta3_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in, out, s) } +// Convert_kubeadm_NodeRegistrationOptions_To_v1beta3_NodeRegistrationOptions converts a private NodeRegistrationOptions to public NodeRegistrationOptions. func Convert_kubeadm_NodeRegistrationOptions_To_v1beta3_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { out.KubeletExtraArgs = convertFromArgs(in.KubeletExtraArgs) return autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta3_NodeRegistrationOptions(in, out, s) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta3/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1beta3/defaults.go index d007ad0b36ed8..f4af754c2474b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta3/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta3/defaults.go @@ -135,6 +135,7 @@ func SetDefaults_JoinConfiguration(obj *JoinConfiguration) { SetDefaults_NodeRegistration(&obj.NodeRegistration) } +// SetDefaults_JoinControlPlane assigns default values for a joining control plane node func SetDefaults_JoinControlPlane(obj *JoinControlPlane) { if obj != nil { SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta3/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1beta3/zz_generated.conversion.go index 7eb0fdf209bad..95abd68c4a29b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta3/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta3/zz_generated.conversion.go @@ -69,16 +69,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ClusterConfiguration)(nil), (*kubeadm.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(a.(*ClusterConfiguration), b.(*kubeadm.ClusterConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.ClusterConfiguration)(nil), (*ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(a.(*kubeadm.ClusterConfiguration), b.(*ClusterConfiguration), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*kubeadm.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta3_DNS_To_kubeadm_DNS(a.(*DNS), b.(*kubeadm.DNS), scope) }); err != nil { @@ -184,6 +174,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*kubeadm.ClusterConfiguration)(nil), (*ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(a.(*kubeadm.ClusterConfiguration), b.(*ClusterConfiguration), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*kubeadm.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_kubeadm_ControlPlaneComponent_To_v1beta3_ControlPlaneComponent(a.(*kubeadm.ControlPlaneComponent), b.(*ControlPlaneComponent), scope) }); err != nil { @@ -209,6 +204,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*ClusterConfiguration)(nil), (*kubeadm.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(a.(*ClusterConfiguration), b.(*kubeadm.ClusterConfiguration), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ControlPlaneComponent)(nil), (*kubeadm.ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta3_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(a.(*ControlPlaneComponent), b.(*kubeadm.ControlPlaneComponent), scope) }); err != nil { @@ -336,11 +336,6 @@ func autoConvert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in return nil } -// Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration is an autogenerated conversion function. -func Convert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in *ClusterConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error { - return autoConvert_v1beta3_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in, out, s) -} - func autoConvert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error { // INFO: in.ComponentConfigs opted out of conversion generation if err := Convert_kubeadm_Etcd_To_v1beta3_Etcd(&in.Etcd, &out.Etcd, s); err != nil { @@ -369,14 +364,10 @@ func autoConvert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(in // INFO: in.CIImageRepository opted out of conversion generation out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) out.ClusterName = in.ClusterName + // WARNING: in.EncryptionAlgorithm requires manual conversion: does not exist in peer-type return nil } -// Convert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration is an autogenerated conversion function. -func Convert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error { - return autoConvert_kubeadm_ClusterConfiguration_To_v1beta3_ClusterConfiguration(in, out, s) -} - func autoConvert_v1beta3_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error { // WARNING: in.ExtraArgs requires manual conversion: inconvertible types (map[string]string vs []k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm.Arg) out.ExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.ExtraVolumes)) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/conversion.go index 6cdbc582b9ff3..cee9265202c85 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/conversion.go @@ -22,10 +22,12 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) +// Convert_kubeadm_InitConfiguration_To_v1beta4_InitConfiguration converts a private InitConfiguration to a public InitConfiguration. func Convert_kubeadm_InitConfiguration_To_v1beta4_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { return autoConvert_kubeadm_InitConfiguration_To_v1beta4_InitConfiguration(in, out, s) } +// Convert_v1beta4_InitConfiguration_To_kubeadm_InitConfiguration converts a public InitConfiguration to a private InitConfiguration. func Convert_v1beta4_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error { err := autoConvert_v1beta4_InitConfiguration_To_kubeadm_InitConfiguration(in, out, s) if err != nil { diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/defaults.go index 314be0b30c741..d6329da6328b2 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/defaults.go @@ -60,6 +60,9 @@ const ( // DefaultImagePullPolicy is the default image pull policy in kubeadm DefaultImagePullPolicy = corev1.PullIfNotPresent + + // DefaultEncryptionAlgorithm is the default encryption algorithm. + DefaultEncryptionAlgorithm = EncryptionAlgorithmRSA ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -99,6 +102,10 @@ func SetDefaults_ClusterConfiguration(obj *ClusterConfiguration) { obj.ClusterName = DefaultClusterName } + if obj.EncryptionAlgorithm == "" { + obj.EncryptionAlgorithm = DefaultEncryptionAlgorithm + } + SetDefaults_Etcd(obj) SetDefaults_APIServer(&obj.APIServer) } @@ -135,6 +142,7 @@ func SetDefaults_JoinConfiguration(obj *JoinConfiguration) { SetDefaults_NodeRegistration(&obj.NodeRegistration) } +// SetDefaults_JoinControlPlane assigns default values for a joining control plane node func SetDefaults_JoinControlPlane(obj *JoinControlPlane) { if obj != nil { SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint) @@ -205,3 +213,15 @@ func SetDefaults_ResetConfiguration(obj *ResetConfiguration) { obj.CertificatesDir = DefaultCertificatesDir } } + +// SetDefaults_EnvVar assigns default values for EnvVar. +// +k8s:defaulter-gen=covers +func SetDefaults_EnvVar(obj *EnvVar) { + if obj.ValueFrom != nil { + if obj.ValueFrom.FieldRef != nil { + if obj.ValueFrom.FieldRef.APIVersion == "" { + obj.ValueFrom.FieldRef.APIVersion = "v1" + } + } + } +} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/doc.go index 511d946e72c87..abe48a5861235 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/doc.go @@ -32,6 +32,9 @@ limitations under the License. // - Replace the existing string/string extra argument maps with structured extra arguments that support duplicates. // The change applies to `ClusterConfiguration` - `APIServer.ExtraArgs, `ControllerManager.ExtraArgs`, // `Scheduler.ExtraArgs`, `Etcd.Local.ExtraArgs`. Also to `NodeRegistrationOptions.KubeletExtraArgs`. +// - Add `ClusterConfiguration.EncryptionAlgorithm` that can be used to set the asymmetric encryption algorithm +// used for this cluster's keys and certificates. Can be "RSA" (default algorithm, key size is 2048) or +// "ECDSA" (uses the P-256 elliptic curve). // // Migration from old kubeadm config versions // diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/types.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/types.go index 54217a03bcf01..75db9e179ba1a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/types.go @@ -140,6 +140,11 @@ type ClusterConfiguration struct { // The cluster name // +optional ClusterName string `json:"clusterName,omitempty"` + + // EncryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and certificates. + // Can be "RSA" (default algorithm, key size is 2048) or "ECDSA" (uses the P-256 elliptic curve). + // +optional + EncryptionAlgorithm EncryptionAlgorithmType `json:"encryptionAlgorithm,omitempty"` } // ControlPlaneComponent holds settings common to control plane component of the cluster @@ -158,7 +163,7 @@ type ControlPlaneComponent struct { // ExtraEnvs is an extra set of environment variables to pass to the control plane component. // Environment variables passed using ExtraEnvs will override any existing environment variables, or *_proxy environment variables that kubeadm adds by default. // +optional - ExtraEnvs []corev1.EnvVar `json:"extraEnvs,omitempty"` + ExtraEnvs []EnvVar `json:"extraEnvs,omitempty"` } // APIServer holds settings necessary for API server deployments in the cluster @@ -174,9 +179,6 @@ type APIServer struct { TimeoutForControlPlane *metav1.Duration `json:"timeoutForControlPlane,omitempty"` } -// DNSAddOnType defines string identifying DNS add-on types -type DNSAddOnType string - // DNS defines the DNS addon that should be used in the cluster type DNS struct { // ImageMeta allows to customize the image used for the DNS component @@ -296,7 +298,7 @@ type LocalEtcd struct { // ExtraEnvs is an extra set of environment variables to pass to the control plane component. // Environment variables passed using ExtraEnvs will override any existing environment variables, or *_proxy environment variables that kubeadm adds by default. // +optional - ExtraEnvs []corev1.EnvVar `json:"extraEnvs,omitempty"` + ExtraEnvs []EnvVar `json:"extraEnvs,omitempty"` // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. // +optional @@ -508,3 +510,18 @@ type Arg struct { Name string `json:"name"` Value string `json:"value"` } + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + corev1.EnvVar `json:",inline"` +} + +// EncryptionAlgorithmType can define an asymmetric encryption algorithm type. +type EncryptionAlgorithmType string + +const ( + // EncryptionAlgorithmECDSA defines the ECDSA encryption algorithm type. + EncryptionAlgorithmECDSA EncryptionAlgorithmType = "ECDSA" + // EncryptionAlgorithmRSA defines the RSA encryption algorithm type. + EncryptionAlgorithmRSA EncryptionAlgorithmType = "RSA" +) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.conversion.go index a203ddf12bbe1..ed01f88d9f652 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.conversion.go @@ -119,6 +119,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*EnvVar)(nil), (*kubeadm.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta4_EnvVar_To_kubeadm_EnvVar(a.(*EnvVar), b.(*kubeadm.EnvVar), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.EnvVar)(nil), (*EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_EnvVar_To_v1beta4_EnvVar(a.(*kubeadm.EnvVar), b.(*EnvVar), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Etcd)(nil), (*kubeadm.Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta4_Etcd_To_kubeadm_Etcd(a.(*Etcd), b.(*kubeadm.Etcd), scope) }); err != nil { @@ -375,6 +385,7 @@ func autoConvert_v1beta4_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in out.ImageRepository = in.ImageRepository out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) out.ClusterName = in.ClusterName + out.EncryptionAlgorithm = kubeadm.EncryptionAlgorithmType(in.EncryptionAlgorithm) return nil } @@ -411,6 +422,7 @@ func autoConvert_kubeadm_ClusterConfiguration_To_v1beta4_ClusterConfiguration(in // INFO: in.CIImageRepository opted out of conversion generation out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) out.ClusterName = in.ClusterName + out.EncryptionAlgorithm = EncryptionAlgorithmType(in.EncryptionAlgorithm) return nil } @@ -422,7 +434,7 @@ func Convert_kubeadm_ClusterConfiguration_To_v1beta4_ClusterConfiguration(in *ku func autoConvert_v1beta4_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error { out.ExtraArgs = *(*[]kubeadm.Arg)(unsafe.Pointer(&in.ExtraArgs)) out.ExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.ExtraVolumes)) - out.ExtraEnvs = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) + out.ExtraEnvs = *(*[]kubeadm.EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) return nil } @@ -434,7 +446,7 @@ func Convert_v1beta4_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in * func autoConvert_kubeadm_ControlPlaneComponent_To_v1beta4_ControlPlaneComponent(in *kubeadm.ControlPlaneComponent, out *ControlPlaneComponent, s conversion.Scope) error { out.ExtraArgs = *(*[]Arg)(unsafe.Pointer(&in.ExtraArgs)) out.ExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.ExtraVolumes)) - out.ExtraEnvs = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) + out.ExtraEnvs = *(*[]EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) return nil } @@ -493,6 +505,26 @@ func Convert_kubeadm_Discovery_To_v1beta4_Discovery(in *kubeadm.Discovery, out * return autoConvert_kubeadm_Discovery_To_v1beta4_Discovery(in, out, s) } +func autoConvert_v1beta4_EnvVar_To_kubeadm_EnvVar(in *EnvVar, out *kubeadm.EnvVar, s conversion.Scope) error { + out.EnvVar = in.EnvVar + return nil +} + +// Convert_v1beta4_EnvVar_To_kubeadm_EnvVar is an autogenerated conversion function. +func Convert_v1beta4_EnvVar_To_kubeadm_EnvVar(in *EnvVar, out *kubeadm.EnvVar, s conversion.Scope) error { + return autoConvert_v1beta4_EnvVar_To_kubeadm_EnvVar(in, out, s) +} + +func autoConvert_kubeadm_EnvVar_To_v1beta4_EnvVar(in *kubeadm.EnvVar, out *EnvVar, s conversion.Scope) error { + out.EnvVar = in.EnvVar + return nil +} + +// Convert_kubeadm_EnvVar_To_v1beta4_EnvVar is an autogenerated conversion function. +func Convert_kubeadm_EnvVar_To_v1beta4_EnvVar(in *kubeadm.EnvVar, out *EnvVar, s conversion.Scope) error { + return autoConvert_kubeadm_EnvVar_To_v1beta4_EnvVar(in, out, s) +} + func autoConvert_v1beta4_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { out.Local = (*kubeadm.LocalEtcd)(unsafe.Pointer(in.Local)) out.External = (*kubeadm.ExternalEtcd)(unsafe.Pointer(in.External)) @@ -714,7 +746,7 @@ func autoConvert_v1beta4_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kube } out.DataDir = in.DataDir out.ExtraArgs = *(*[]kubeadm.Arg)(unsafe.Pointer(&in.ExtraArgs)) - out.ExtraEnvs = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) + out.ExtraEnvs = *(*[]kubeadm.EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) return nil @@ -731,7 +763,7 @@ func autoConvert_kubeadm_LocalEtcd_To_v1beta4_LocalEtcd(in *kubeadm.LocalEtcd, o } out.DataDir = in.DataDir out.ExtraArgs = *(*[]Arg)(unsafe.Pointer(&in.ExtraArgs)) - out.ExtraEnvs = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) + out.ExtraEnvs = *(*[]EnvVar)(unsafe.Pointer(&in.ExtraEnvs)) out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) return nil diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.deepcopy.go index c030a560dc1f2..872a6e116c2c6 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.deepcopy.go @@ -161,7 +161,7 @@ func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) { } if in.ExtraEnvs != nil { in, out := &in.ExtraEnvs, &out.ExtraEnvs - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -227,6 +227,23 @@ func (in *Discovery) DeepCopy() *Discovery { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + in.EnvVar.DeepCopyInto(&out.EnvVar) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Etcd) DeepCopyInto(out *Etcd) { *out = *in @@ -436,7 +453,7 @@ func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { } if in.ExtraEnvs != nil { in, out := &in.ExtraEnvs, &out.ExtraEnvs - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.defaults.go index b36b0f0f952ae..5ee0db52d7cd0 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1beta4/zz_generated.defaults.go @@ -23,7 +23,6 @@ package v1beta4 import ( runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -42,37 +41,21 @@ func SetObjectDefaults_ClusterConfiguration(in *ClusterConfiguration) { if in.Etcd.Local != nil { for i := range in.Etcd.Local.ExtraEnvs { a := &in.Etcd.Local.ExtraEnvs[i] - if a.ValueFrom != nil { - if a.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) - } - } + SetDefaults_EnvVar(a) } } SetDefaults_APIServer(&in.APIServer) for i := range in.APIServer.ControlPlaneComponent.ExtraEnvs { a := &in.APIServer.ControlPlaneComponent.ExtraEnvs[i] - if a.ValueFrom != nil { - if a.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) - } - } + SetDefaults_EnvVar(a) } for i := range in.ControllerManager.ExtraEnvs { a := &in.ControllerManager.ExtraEnvs[i] - if a.ValueFrom != nil { - if a.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) - } - } + SetDefaults_EnvVar(a) } for i := range in.Scheduler.ExtraEnvs { a := &in.Scheduler.ExtraEnvs[i] - if a.ValueFrom != nil { - if a.ValueFrom.FieldRef != nil { - v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) - } - } + SetDefaults_EnvVar(a) } } diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index 23c0590d7c3bc..a8e1c625f8301 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -72,6 +72,7 @@ func ValidateClusterConfiguration(c *kubeadm.ClusterConfiguration) field.ErrorLi allErrs = append(allErrs, ValidateHostPort(c.ControlPlaneEndpoint, field.NewPath("controlPlaneEndpoint"))...) allErrs = append(allErrs, ValidateImageRepository(c.ImageRepository, field.NewPath("imageRepository"))...) allErrs = append(allErrs, ValidateEtcd(&c.Etcd, field.NewPath("etcd"))...) + allErrs = append(allErrs, ValidateEncryptionAlgorithm(string(c.EncryptionAlgorithm), field.NewPath("encryptionAlgorithm"))...) allErrs = append(allErrs, componentconfigs.Validate(c)...) return allErrs } @@ -337,6 +338,17 @@ func ValidateEtcd(e *kubeadm.Etcd, fldPath *field.Path) field.ErrorList { return allErrs } +// ValidateEncryptionAlgorithm validates the public key algorithm +func ValidateEncryptionAlgorithm(algo string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if algo != string(kubeadm.EncryptionAlgorithmRSA) && algo != string(kubeadm.EncryptionAlgorithmECDSA) { + msg := fmt.Sprintf("Invalid encryption algorithm. Must be %q or %q", + kubeadm.EncryptionAlgorithmRSA, kubeadm.EncryptionAlgorithmECDSA) + allErrs = append(allErrs, field.Invalid(fldPath, algo, msg)) + } + return allErrs +} + // ValidateCertSANs validates alternative names func ValidateCertSANs(altnames []string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index c6ddf38e72bac..32ffe5a23208c 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -513,7 +513,8 @@ func TestValidateInitConfiguration(t *testing.T) { ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/cert/dir", + CertificatesDir: "/some/cert/dir", + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmRSA, }, NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodename, CRISocket: criPath}, }, false}, @@ -528,7 +529,8 @@ func TestValidateInitConfiguration(t *testing.T) { ServiceSubnet: "2001:db8::1/98", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/cert/dir", + CertificatesDir: "/some/cert/dir", + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmRSA, }, NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodename, CRISocket: criPath}, }, false}, @@ -543,7 +545,8 @@ func TestValidateInitConfiguration(t *testing.T) { ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/other/cert/dir", + CertificatesDir: "/some/other/cert/dir", + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmRSA, }, }, false}, {"valid InitConfiguration with incorrect IPv4 pod subnet", @@ -558,7 +561,8 @@ func TestValidateInitConfiguration(t *testing.T) { DNSDomain: "cluster.local", PodSubnet: "10.0.1.15", }, - CertificatesDir: "/some/other/cert/dir", + CertificatesDir: "/some/other/cert/dir", + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmRSA, }, NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodename, CRISocket: criPath}, }, false}, @@ -580,7 +584,8 @@ func TestValidateInitConfiguration(t *testing.T) { DNSDomain: "cluster.local", PodSubnet: "10.0.1.15/16", }, - CertificatesDir: "/some/other/cert/dir", + CertificatesDir: "/some/other/cert/dir", + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmRSA, }, NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodename, CRISocket: criPath}, }, true}, @@ -601,7 +606,8 @@ func TestValidateInitConfiguration(t *testing.T) { ServiceSubnet: "2001:db8::1/112", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/other/cert/dir", + CertificatesDir: "/some/other/cert/dir", + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmECDSA, }, NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodename, CRISocket: criPath}, }, true}, @@ -1187,6 +1193,26 @@ func TestValidateEtcd(t *testing.T) { } } +func TestValidateEncryptionAlgorithm(t *testing.T) { + var tests = []struct { + name string + algo string + expectedErrors bool + }{ + {name: "valid RSA", algo: string(kubeadmapi.EncryptionAlgorithmRSA), expectedErrors: false}, + {name: "valid ECDSA", algo: string(kubeadmapi.EncryptionAlgorithmECDSA), expectedErrors: false}, + {name: "invalid algorithm", algo: "foo", expectedErrors: true}, + {name: "empty algorithm returns an error", algo: "", expectedErrors: true}, + } + for _, tc := range tests { + actual := ValidateEncryptionAlgorithm(tc.algo, field.NewPath("encryptionAlgorithm")) + actualErrors := len(actual) > 0 + if actualErrors != tc.expectedErrors { + t.Errorf("error: validate public key algorithm: %q\n\texpected: %t\n\t actual: %t", tc.algo, tc.expectedErrors, actualErrors) + } + } +} + func TestGetClusterNodeMask(t *testing.T) { tests := []struct { name string diff --git a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go index 1e7d371323f6c..f0265b9a3e7e6 100644 --- a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -190,7 +190,7 @@ func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) { } if in.ExtraEnvs != nil { in, out := &in.ExtraEnvs, &out.ExtraEnvs - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -256,6 +256,23 @@ func (in *Discovery) DeepCopy() *Discovery { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + in.EnvVar.DeepCopyInto(&out.EnvVar) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Etcd) DeepCopyInto(out *Etcd) { *out = *in @@ -466,7 +483,7 @@ func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { } if in.ExtraEnvs != nil { in, out := &in.ExtraEnvs, &out.ExtraEnvs - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/cmd/kubeadm/app/cmd/certs.go b/cmd/kubeadm/app/cmd/certs.go index 0ea602c1df1af..c0518c36971f3 100644 --- a/cmd/kubeadm/app/cmd/certs.go +++ b/cmd/kubeadm/app/cmd/certs.go @@ -134,8 +134,11 @@ func (o *genCSRConfig) addFlagSet(flagSet *pflag.FlagSet) { func (o *genCSRConfig) load() (err error) { o.kubeadmConfig, err = configutil.LoadOrDefaultInitConfiguration( o.kubeadmConfigPath, - cmdutil.DefaultInitConfiguration(), + &kubeadmapiv1.InitConfiguration{}, &kubeadmapiv1.ClusterConfiguration{}, + configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + }, ) if err != nil { return err @@ -353,7 +356,9 @@ func getInternalCfg(cfgPath string, kubeconfigPath string, cfg kubeadmapiv1.Clus } // Read config from --config if provided. Otherwise, use the default configuration - return configutil.LoadOrDefaultInitConfiguration(cfgPath, cmdutil.DefaultInitConfiguration(), &cfg) + return configutil.LoadOrDefaultInitConfiguration(cfgPath, &kubeadmapiv1.InitConfiguration{}, &cfg, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + }) } // newCmdCertsExpiration creates a new `cert check-expiration` command. diff --git a/cmd/kubeadm/app/cmd/certs_test.go b/cmd/kubeadm/app/cmd/certs_test.go index 40f6770b52db4..8dff90ad8e23e 100644 --- a/cmd/kubeadm/app/cmd/certs_test.go +++ b/cmd/kubeadm/app/cmd/certs_test.go @@ -134,6 +134,7 @@ func TestRunRenewCommands(t *testing.T) { // Generate all the kubeconfig files with embedded certs for _, kubeConfig := range []string{ kubeadmconstants.AdminKubeConfigFileName, + kubeadmconstants.SuperAdminKubeConfigFileName, kubeadmconstants.SchedulerKubeConfigFileName, kubeadmconstants.ControllerManagerKubeConfigFileName, } { @@ -162,6 +163,7 @@ func TestRunRenewCommands(t *testing.T) { }, KubeconfigFiles: []string{ kubeadmconstants.AdminKubeConfigFileName, + kubeadmconstants.SuperAdminKubeConfigFileName, kubeadmconstants.SchedulerKubeConfigFileName, kubeadmconstants.ControllerManagerKubeConfigFileName, }, @@ -214,6 +216,12 @@ func TestRunRenewCommands(t *testing.T) { kubeadmconstants.AdminKubeConfigFileName, }, }, + { + command: "super-admin.conf", + KubeconfigFiles: []string{ + kubeadmconstants.SuperAdminKubeConfigFileName, + }, + }, { command: "scheduler.conf", KubeconfigFiles: []string{ diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index f328547687acc..e19d10e56563b 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -212,6 +212,9 @@ func getDefaultInitConfigBytes() ([]byte, error) { } func getDefaultNodeConfigBytes() ([]byte, error) { + opts := configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + } internalcfg, err := configutil.DefaultedJoinConfiguration(&kubeadmapiv1old.JoinConfiguration{ Discovery: kubeadmapiv1old.Discovery{ BootstrapToken: &kubeadmapiv1old.BootstrapTokenDiscovery{ @@ -220,10 +223,7 @@ func getDefaultNodeConfigBytes() ([]byte, error) { UnsafeSkipCAVerification: true, // TODO: UnsafeSkipCAVerification: true needs to be set for validation to pass, but shouldn't be recommended as the default }, }, - NodeRegistration: kubeadmapiv1old.NodeRegistrationOptions{ - CRISocket: constants.DefaultCRISocket, // avoid CRI detection - }, - }) + }, opts) if err != nil { return []byte{}, err } @@ -232,9 +232,10 @@ func getDefaultNodeConfigBytes() ([]byte, error) { } func getDefaultResetConfigBytes() ([]byte, error) { - internalcfg, err := configutil.DefaultedResetConfiguration(&kubeadmapiv1.ResetConfiguration{ - CRISocket: constants.DefaultCRISocket, // avoid CRI detection - }) + opts := configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + } + internalcfg, err := configutil.DefaultedResetConfiguration(&kubeadmapiv1.ResetConfiguration{}, opts) if err != nil { return []byte{}, err } @@ -367,7 +368,7 @@ func newCmdConfigImagesPull() *cobra.Command { if err != nil { return err } - internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, externalInitCfg, externalClusterCfg) + internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, externalInitCfg, externalClusterCfg, configutil.LoadOrDefaultConfigurationOptions{}) if err != nil { return err } @@ -442,7 +443,9 @@ func newCmdConfigImagesList(out io.Writer, mockK8sVersion *string) *cobra.Comman // NewImagesList returns the underlying struct for the "kubeadm config images list" command func NewImagesList(cfgPath string, cfg *kubeadmapiv1old.ClusterConfiguration) (*ImagesList, error) { - initcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, cmdutil.DefaultInitConfiguration(), cfg) + initcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, &kubeadmapiv1old.InitConfiguration{}, cfg, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + }) if err != nil { return nil, errors.Wrap(err, "could not convert cfg to an internal cfg") } diff --git a/cmd/kubeadm/app/cmd/config_test.go b/cmd/kubeadm/app/cmd/config_test.go index 7cf01f197907d..6ff0f00df8c28 100644 --- a/cmd/kubeadm/app/cmd/config_test.go +++ b/cmd/kubeadm/app/cmd/config_test.go @@ -82,13 +82,8 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) { }, configContents: []byte(dedent.Dedent(fmt.Sprintf(` apiVersion: %s -kind: InitConfiguration -nodeRegistration: - criSocket: %s ---- -apiVersion: %[1]s kind: ClusterConfiguration -kubernetesVersion: %[3]s`, kubeadmapiv1.SchemeGroupVersion.String(), constants.UnknownCRISocket, constants.CurrentKubernetesVersion))), +kubernetesVersion: %s`, kubeadmapiv1.SchemeGroupVersion.String(), constants.CurrentKubernetesVersion))), }, { name: "use coredns", @@ -98,13 +93,8 @@ kubernetesVersion: %[3]s`, kubeadmapiv1.SchemeGroupVersion.String(), constants.U }, configContents: []byte(dedent.Dedent(fmt.Sprintf(` apiVersion: %s -kind: InitConfiguration -nodeRegistration: - criSocket: %s ---- -apiVersion: %[1]s kind: ClusterConfiguration -kubernetesVersion: %[3]s`, kubeadmapiv1.SchemeGroupVersion.String(), constants.UnknownCRISocket, constants.MinimumControlPlaneVersion))), +kubernetesVersion: %s`, kubeadmapiv1.SchemeGroupVersion.String(), constants.MinimumControlPlaneVersion))), }, } diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 1c467bf711e3d..f57acf12d8c3f 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -63,6 +63,7 @@ type initOptions struct { uploadCerts bool skipCertificateKeyPrint bool patchesDir string + skipCRIDetect bool } // compile-time assert that the local data object satisfies the phases data interface. @@ -71,20 +72,21 @@ var _ phases.InitData = &initData{} // initData defines all the runtime information used when running the kubeadm init workflow; // this data is shared across all the phases that are included in the workflow. type initData struct { - cfg *kubeadmapi.InitConfiguration - skipTokenPrint bool - dryRun bool - kubeconfigDir string - kubeconfigPath string - ignorePreflightErrors sets.Set[string] - certificatesDir string - dryRunDir string - externalCA bool - client clientset.Interface - outputWriter io.Writer - uploadCerts bool - skipCertificateKeyPrint bool - patchesDir string + cfg *kubeadmapi.InitConfiguration + skipTokenPrint bool + dryRun bool + kubeconfigDir string + kubeconfigPath string + ignorePreflightErrors sets.Set[string] + certificatesDir string + dryRunDir string + externalCA bool + client clientset.Interface + outputWriter io.Writer + uploadCerts bool + skipCertificateKeyPrint bool + patchesDir string + adminKubeConfigBootstrapped bool } // newCmdInit returns "kubeadm init" command. @@ -150,9 +152,9 @@ func newCmdInit(out io.Writer, initOptions *initOptions) *cobra.Command { // both when running the entire workflow or single phases initRunner.SetDataInitializer(func(cmd *cobra.Command, args []string) (workflow.RunData, error) { if cmd.Flags().Lookup(options.NodeCRISocket) == nil { - // avoid CRI detection + // skip CRI detection // assume that the command execution does not depend on CRISocket when --cri-socket flag is not set - initOptions.externalInitCfg.NodeRegistration.CRISocket = kubeadmconstants.UnknownCRISocket + initOptions.skipCRIDetect = true } data, err := newInitData(cmd, args, initOptions, out) if err != nil { @@ -301,7 +303,9 @@ func newInitData(cmd *cobra.Command, args []string, initOptions *initOptions, ou // Either use the config file if specified, or convert public kubeadm API to the internal InitConfiguration // and validates InitConfiguration - cfg, err := configutil.LoadOrDefaultInitConfiguration(initOptions.cfgPath, initOptions.externalInitCfg, initOptions.externalClusterCfg) + cfg, err := configutil.LoadOrDefaultInitConfiguration(initOptions.cfgPath, initOptions.externalInitCfg, initOptions.externalClusterCfg, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: initOptions.skipCRIDetect, + }) if err != nil { return nil, err } @@ -492,12 +496,22 @@ func (d *initData) Client() (clientset.Interface, error) { // If we're dry-running, we should create a faked client that answers some GETs in order to be able to do the full init flow and just logs the rest of requests dryRunGetter := apiclient.NewInitDryRunGetter(d.cfg.NodeRegistration.Name, svcSubnetCIDR.String()) d.client = apiclient.NewDryRunClient(dryRunGetter, os.Stdout) - } else { - // If we're acting for real, we should create a connection to the API server and wait for it to come up + } else { // Use a real client var err error - d.client, err = kubeconfigutil.ClientSetFromFile(d.KubeConfigPath()) - if err != nil { - return nil, err + if !d.adminKubeConfigBootstrapped { + // Call EnsureAdminClusterRoleBinding() to obtain a working client from admin.conf. + d.client, err = kubeconfigphase.EnsureAdminClusterRoleBinding(kubeadmconstants.KubernetesDir, nil) + if err != nil { + return nil, errors.Wrapf(err, "could not bootstrap the admin user in file %s", kubeadmconstants.AdminKubeConfigFileName) + } + d.adminKubeConfigBootstrapped = true + } else { + // In case adminKubeConfigBootstrapped is already set just return a client from the default + // kubeconfig location. + d.client, err = kubeconfigutil.ClientSetFromFile(d.KubeConfigPath()) + if err != nil { + return nil, err + } } } } diff --git a/cmd/kubeadm/app/cmd/init_test.go b/cmd/kubeadm/app/cmd/init_test.go index ef380388af245..2f859752819dd 100644 --- a/cmd/kubeadm/app/cmd/init_test.go +++ b/cmd/kubeadm/app/cmd/init_test.go @@ -81,7 +81,7 @@ func TestNewInitData(t *testing.T) { }{ // Init data passed using flags { - name: "pass without any flag except the cri socket (use defaults)", + name: "pass without any flag (use defaults)", }, { name: "fail if unknown feature gates flag are passed", @@ -188,17 +188,9 @@ func TestNewInitData(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // initialize an external init option and inject it to the init cmd initOptions := newInitOptions() + initOptions.skipCRIDetect = true // avoid CRI detection in unit tests cmd := newCmdInit(nil, initOptions) - // set the cri socket here, otherwise the testcase might fail if is run on the node with multiple - // cri endpoints configured, the failure caused by this is normally not an expected failure. - if tc.flags == nil { - tc.flags = make(map[string]string) - } - // set `cri-socket` only if `CfgPath` is not set - if _, okay := tc.flags[options.CfgPath]; !okay { - tc.flags[options.NodeCRISocket] = constants.UnknownCRISocket - } // sets cmd flags (that will be reflected on the init options) for f, v := range tc.flags { cmd.Flags().Set(f, v) diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index f5ffb0f37c1e7..67353120b5ffe 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -135,6 +135,7 @@ type joinOptions struct { externalcfg *kubeadmapiv1.JoinConfiguration patchesDir string dryRun bool + skipCRIDetect bool } // compile-time assert that the local data object satisfies the phases data interface. @@ -221,9 +222,9 @@ func newCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { // both when running the entire workflow or single phases joinRunner.SetDataInitializer(func(cmd *cobra.Command, args []string) (workflow.RunData, error) { if cmd.Flags().Lookup(options.NodeCRISocket) == nil { - // avoid CRI detection + // skip CRI detection // assume that the command execution does not depend on CRISocket when --cri-socket flag is not set - joinOptions.externalcfg.NodeRegistration.CRISocket = kubeadmconstants.UnknownCRISocket + joinOptions.skipCRIDetect = true } data, err := newJoinData(cmd, args, joinOptions, out, kubeadmconstants.GetAdminKubeConfigPath()) if err != nil { @@ -426,7 +427,9 @@ func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Wri opt.externalcfg.Discovery.BootstrapToken = nil //NB. this could be removed when we get better control on args (e.g. phases without discovery should have NoArgs ) } - cfg, err := configutil.LoadOrDefaultJoinConfiguration(opt.cfgPath, opt.externalcfg) + cfg, err := configutil.LoadOrDefaultJoinConfiguration(opt.cfgPath, opt.externalcfg, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: opt.skipCRIDetect, + }) if err != nil { return nil, err } diff --git a/cmd/kubeadm/app/cmd/join_test.go b/cmd/kubeadm/app/cmd/join_test.go index 2487193d42bf6..1ab6327f405fb 100644 --- a/cmd/kubeadm/app/cmd/join_test.go +++ b/cmd/kubeadm/app/cmd/join_test.go @@ -35,7 +35,6 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" ) @@ -312,6 +311,7 @@ func TestNewJoinData(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // initialize an external join option and inject it to the join cmd joinOptions := newJoinOptions() + joinOptions.skipCRIDetect = true // avoid CRI detection in unit tests cmd := newCmdJoin(nil, joinOptions) // set klog output destination to bytes.Buffer so that log could be fetched and verified later. @@ -320,15 +320,6 @@ func TestNewJoinData(t *testing.T) { klog.LogToStderr(false) defer klog.LogToStderr(true) - // set the cri socket here, otherwise the testcase might fail if is run on the node with multiple - // cri endpoints configured, the failure caused by this is normally not an expected failure. - if tc.flags == nil { - tc.flags = make(map[string]string) - } - // set `cri-socket` only if `CfgPath` is not set - if _, okay := tc.flags[options.CfgPath]; !okay { - tc.flags[options.NodeCRISocket] = constants.UnknownCRISocket - } // sets cmd flags (that will be reflected on the join options) for f, v := range tc.flags { cmd.Flags().Set(f, v) diff --git a/cmd/kubeadm/app/cmd/kubeconfig.go b/cmd/kubeadm/app/cmd/kubeconfig.go index baf1bc16bec16..14b592eb2aba3 100644 --- a/cmd/kubeadm/app/cmd/kubeconfig.go +++ b/cmd/kubeadm/app/cmd/kubeconfig.go @@ -65,7 +65,7 @@ func newCmdKubeConfigUtility(out io.Writer) *cobra.Command { // newCmdUserKubeConfig returns sub commands for kubeconfig phase func newCmdUserKubeConfig(out io.Writer) *cobra.Command { - initCfg := cmdutil.DefaultInitConfiguration() + initCfg := &kubeadmapiv1.InitConfiguration{} clusterCfg := &kubeadmapiv1.ClusterConfiguration{} var ( @@ -82,7 +82,9 @@ func newCmdUserKubeConfig(out io.Writer) *cobra.Command { Example: userKubeconfigExample, RunE: func(cmd *cobra.Command, args []string) error { // This call returns the ready-to-use configuration based on the defaults populated by flags - internalCfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, initCfg, clusterCfg) + internalCfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, initCfg, clusterCfg, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + }) if err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/kubeconfig_test.go b/cmd/kubeadm/app/cmd/kubeconfig_test.go index 1f3c8fb639a90..2b067d24ecbd4 100644 --- a/cmd/kubeadm/app/cmd/kubeconfig_test.go +++ b/cmd/kubeadm/app/cmd/kubeconfig_test.go @@ -45,9 +45,6 @@ func generateTestKubeadmConfig(dir, id, certDir, clusterName string) (string, er AdvertiseAddress: "1.2.3.4", BindPort: 1234, }, - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - CRISocket: kubeadmconstants.UnknownCRISocket, - }, } clusterCfg := kubeadmapiv1.ClusterConfiguration{ TypeMeta: metav1.TypeMeta{ diff --git a/cmd/kubeadm/app/cmd/phases/init/certs.go b/cmd/kubeadm/app/cmd/phases/init/certs.go index 5d2ef1cc10075..27c949f5b83c6 100644 --- a/cmd/kubeadm/app/cmd/phases/init/certs.go +++ b/cmd/kubeadm/app/cmd/phases/init/certs.go @@ -39,14 +39,15 @@ var ( saKeyLongDesc = fmt.Sprintf(cmdutil.LongDesc(` Generate the private key for signing service account tokens along with its public key, and save them into %s and %s files. + If both files already exist, kubeadm skips the generation step and existing files will be used. - `+cmdutil.AlphaDisclaimer), kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName) + `), kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName) genericLongDesc = cmdutil.LongDesc(` Generate the %[1]s, and save them into %[2]s.crt and %[2]s.key files.%[3]s If both files already exist, kubeadm skips the generation step and existing files will be used. - ` + cmdutil.AlphaDisclaimer) + `) ) // NewCertsPhase returns the phase for the certs @@ -138,11 +139,12 @@ func getCertPhaseFlags(name string) []string { } func getSANDescription(certSpec *certsphase.KubeadmCert) string { - //Defaulted config we will use to get SAN certs - defaultConfig := cmdutil.DefaultInitConfiguration() - // GetAPIServerAltNames errors without an AdvertiseAddress; this is as good as any. - defaultConfig.LocalAPIEndpoint = kubeadmapiv1.APIEndpoint{ - AdvertiseAddress: "127.0.0.1", + // Defaulted config we will use to get SAN certs + defaultConfig := &kubeadmapiv1.InitConfiguration{ + LocalAPIEndpoint: kubeadmapiv1.APIEndpoint{ + // GetAPIServerAltNames errors without an AdvertiseAddress; this is as good as any. + AdvertiseAddress: "127.0.0.1", + }, } defaultInternalConfig := &kubeadmapi.InitConfiguration{} @@ -188,7 +190,7 @@ func runCertsSa(c workflow.RunData) error { } // create the new service account key (or use existing) - return certsphase.CreateServiceAccountKeyAndPublicKeyFiles(data.CertificateWriteDir(), data.Cfg().ClusterConfiguration.PublicKeyAlgorithm()) + return certsphase.CreateServiceAccountKeyAndPublicKeyFiles(data.CertificateWriteDir(), data.Cfg().ClusterConfiguration.EncryptionAlgorithmType()) } func runCerts(c workflow.RunData) error { diff --git a/cmd/kubeadm/app/cmd/phases/init/kubeconfig.go b/cmd/kubeadm/app/cmd/phases/init/kubeconfig.go index a5cd4bfb2fe6f..77432a133549e 100644 --- a/cmd/kubeadm/app/cmd/phases/init/kubeconfig.go +++ b/cmd/kubeadm/app/cmd/phases/init/kubeconfig.go @@ -41,6 +41,11 @@ var ( short: "Generate a kubeconfig file for the admin to use and for kubeadm itself", long: "Generate the kubeconfig file for the admin and for kubeadm itself, and save it to %s file.", }, + kubeadmconstants.SuperAdminKubeConfigFileName: { + name: "super-admin", + short: "Generate a kubeconfig file for the super-admin", + long: "Generate a kubeconfig file for the super-admin, and save it to %s file.", + }, kubeadmconstants.KubeletKubeConfigFileName: { name: "kubelet", short: "Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes", @@ -77,6 +82,7 @@ func NewKubeConfigPhase() workflow.Phase { RunAllSiblings: true, }, NewKubeConfigFilePhase(kubeadmconstants.AdminKubeConfigFileName), + NewKubeConfigFilePhase(kubeadmconstants.SuperAdminKubeConfigFileName), NewKubeConfigFilePhase(kubeadmconstants.KubeletKubeConfigFileName), NewKubeConfigFilePhase(kubeadmconstants.ControllerManagerKubeConfigFileName), NewKubeConfigFilePhase(kubeadmconstants.SchedulerKubeConfigFileName), diff --git a/cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go b/cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go index 671cda62df1d3..d78a2926d9128 100644 --- a/cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go +++ b/cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go @@ -169,6 +169,7 @@ func resetConfigDir(configPathDir string, dirsToClean []string, isDryRun bool) { filesToClean := []string{ filepath.Join(configPathDir, kubeadmconstants.AdminKubeConfigFileName), + filepath.Join(configPathDir, kubeadmconstants.SuperAdminKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.KubeletKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.KubeletBootstrapKubeConfigFileName), filepath.Join(configPathDir, kubeadmconstants.ControllerManagerKubeConfigFileName), @@ -212,6 +213,7 @@ func CleanDir(filePath string) error { return nil } +// IsDirEmpty returns true if a directory is empty func IsDirEmpty(dir string) (bool, error) { d, err := os.Open(dir) if err != nil { diff --git a/cmd/kubeadm/app/cmd/phases/reset/cleanupnode_test.go b/cmd/kubeadm/app/cmd/phases/reset/cleanupnode_test.go index 7b6aff3408284..de714cc815692 100644 --- a/cmd/kubeadm/app/cmd/phases/reset/cleanupnode_test.go +++ b/cmd/kubeadm/app/cmd/phases/reset/cleanupnode_test.go @@ -68,6 +68,7 @@ func TestConfigDirCleaner(t *testing.T) { "manifests/kube-apiserver.yaml", "pki/ca.pem", kubeadmconstants.AdminKubeConfigFileName, + kubeadmconstants.SuperAdminKubeConfigFileName, kubeadmconstants.KubeletKubeConfigFileName, }, verifyExists: []string{ diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index d70fc1ddfdcfe..973078e1794b1 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -67,6 +67,7 @@ type resetOptions struct { cfgPath string ignorePreflightErrors []string externalcfg *v1beta4.ResetConfiguration + skipCRIDetect bool } // resetData defines all the runtime information used when running the kubeadm reset workflow; @@ -107,7 +108,10 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W var initCfg *kubeadmapi.InitConfiguration // Either use the config file if specified, or convert public kubeadm API to the internal ResetConfiguration and validates cfg. - resetCfg, err := configutil.LoadOrDefaultResetConfiguration(opts.cfgPath, opts.externalcfg, allowExperimental) + resetCfg, err := configutil.LoadOrDefaultResetConfiguration(opts.cfgPath, opts.externalcfg, configutil.LoadOrDefaultConfigurationOptions{ + AllowExperimental: allowExperimental, + SkipCRIDetect: opts.skipCRIDetect, + }) if err != nil { return nil, err } @@ -229,9 +233,9 @@ func newCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra // both when running the entire workflow or single phases resetRunner.SetDataInitializer(func(cmd *cobra.Command, args []string) (workflow.RunData, error) { if cmd.Flags().Lookup(options.NodeCRISocket) == nil { - // avoid CRI detection + // skip CRI detection // assume that the command execution does not depend on CRISocket when --cri-socket flag is not set - resetOptions.externalcfg.CRISocket = kubeadmconstants.UnknownCRISocket + resetOptions.skipCRIDetect = true } data, err := newResetData(cmd, resetOptions, in, out, true) if err != nil { diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index 84e0a0c1f909f..ab5d4de7185f0 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -90,7 +90,7 @@ func newCmdToken(out io.Writer, errW io.Writer) *cobra.Command { tokenCmd.PersistentFlags().BoolVar(&dryRun, options.DryRun, dryRun, "Whether to enable dry-run mode or not") - cfg := cmdutil.DefaultInitConfiguration() + cfg := &kubeadmapiv1.InitConfiguration{} // Default values for the cobra help text kubeadmscheme.Scheme.Default(cfg) @@ -242,7 +242,9 @@ func RunCreateToken(out io.Writer, client clientset.Interface, cfgPath string, i // This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags klog.V(1).Infoln("[token] loading configurations") - internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, initCfg, clusterCfg) + internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, initCfg, clusterCfg, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + }) if err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/token_test.go b/cmd/kubeadm/app/cmd/token_test.go index 5e3e31dbf776f..71d75fee1a290 100644 --- a/cmd/kubeadm/app/cmd/token_test.go +++ b/cmd/kubeadm/app/cmd/token_test.go @@ -36,7 +36,6 @@ import ( outputapischeme "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/scheme" outputapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/v1alpha2" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/output" ) @@ -173,9 +172,6 @@ func TestRunCreateToken(t *testing.T) { Groups: tc.extraGroups, }, }, - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - CRISocket: constants.UnknownCRISocket, - }, } err = RunCreateToken(&buf, fakeClient, "", cfg, tc.printJoin, "", "") diff --git a/cmd/kubeadm/app/cmd/upgrade/common.go b/cmd/kubeadm/app/cmd/upgrade/common.go index eec3652021e6b..ee90ddc2a1109 100644 --- a/cmd/kubeadm/app/cmd/upgrade/common.go +++ b/cmd/kubeadm/app/cmd/upgrade/common.go @@ -92,7 +92,7 @@ func loadConfig(cfgPath string, client clientset.Interface, skipComponentConfigs // The resulting configs overwrite the existing cluster ones at the end of a successful upgrade apply operation. if isKubeadmConfigPresent(docmap) { klog.Warning("WARNING: Usage of the --config flag with kubeadm config types for reconfiguring the cluster during upgrade is not recommended!") - cfg, err := configutil.BytesToInitConfiguration(configBytes) + cfg, err := configutil.BytesToInitConfiguration(configBytes, false) return cfg, true, err } @@ -120,6 +120,7 @@ func loadConfig(cfgPath string, client clientset.Interface, skipComponentConfigs return initCfg, false, nil } +// LoadConfigFunc is a function type that loads configuration from a file and/or the cluster. type LoadConfigFunc func(cfgPath string, client clientset.Interface, skipComponentConfigs bool, printer output.Printer) (*kubeadmapi.InitConfiguration, bool, error) // enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure @@ -184,7 +185,7 @@ func enforceRequirements(flags *applyPlanFlags, args []string, dryRun bool, upgr // Ensure the user is root klog.V(1).Info("running preflight checks") - if err := runPreflightChecks(client, ignorePreflightErrorsSet, &cfg.ClusterConfiguration, printer); err != nil { + if err := runPreflightChecks(client, ignorePreflightErrorsSet, printer); err != nil { return nil, nil, nil, err } @@ -236,7 +237,7 @@ func printConfiguration(clustercfg *kubeadmapi.ClusterConfiguration, w io.Writer } // runPreflightChecks runs the root preflight check -func runPreflightChecks(client clientset.Interface, ignorePreflightErrors sets.Set[string], cfg *kubeadmapi.ClusterConfiguration, printer output.Printer) error { +func runPreflightChecks(client clientset.Interface, ignorePreflightErrors sets.Set[string], printer output.Printer) error { printer.Printf("[preflight] Running pre-flight checks.\n") err := preflight.RunRootCheckOnly(ignorePreflightErrors) if err != nil { diff --git a/cmd/kubeadm/app/cmd/upgrade/diff.go b/cmd/kubeadm/app/cmd/upgrade/diff.go index 6516284985c95..4f7a4e7322d5f 100644 --- a/cmd/kubeadm/app/cmd/upgrade/diff.go +++ b/cmd/kubeadm/app/cmd/upgrade/diff.go @@ -111,7 +111,9 @@ func runDiff(flags *diffFlags, args []string) error { var err error var cfg *kubeadmapi.InitConfiguration if flags.cfgPath != "" { - cfg, err = configutil.LoadInitConfigurationFromFile(flags.cfgPath) + cfg, err = configutil.LoadInitConfigurationFromFile(flags.cfgPath, configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + }) } else { var client *client.Clientset client, err = kubeconfigutil.ClientSetFromFile(flags.kubeConfigPath) diff --git a/cmd/kubeadm/app/cmd/upgrade/diff_test.go b/cmd/kubeadm/app/cmd/upgrade/diff_test.go index ca90c05bd82fb..b9aa325a410ef 100644 --- a/cmd/kubeadm/app/cmd/upgrade/diff_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/diff_test.go @@ -49,12 +49,10 @@ func TestRunDiff(t *testing.T) { testUpgradeDiffConfigContents := []byte(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration -nodeRegistration: - criSocket: %s --- apiVersion: %[1]s kind: ClusterConfiguration -kubernetesVersion: %[3]s`, kubeadmapiv1.SchemeGroupVersion.String(), constants.UnknownCRISocket, currentVersion)) +kubernetesVersion: %s`, kubeadmapiv1.SchemeGroupVersion.String(), currentVersion)) testUpgradeDiffConfig, err := createTestRunDiffFile(testUpgradeDiffConfigContents) if err != nil { t.Fatal(err) diff --git a/cmd/kubeadm/app/cmd/upgrade/plan.go b/cmd/kubeadm/app/cmd/upgrade/plan.go index 74df27dca4efe..64a9dc2473c3e 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan.go @@ -32,16 +32,13 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" - clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" outputapischeme "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/scheme" outputapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/v1alpha2" "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" - kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/output" ) @@ -268,7 +265,7 @@ func runPlan(flags *planFlags, args []string, printer output.Printer) error { // Fetch the current state of the component configs klog.V(1).Infoln("[upgrade/plan] analysing component config version states") - configVersionStates, err := getComponentConfigVersionStates(&cfg.ClusterConfiguration, client, flags.cfgPath) + configVersionStates, err := componentconfigs.GetVersionStates(&cfg.ClusterConfiguration, client) if err != nil { return errors.WithMessage(err, "[upgrade/versions] FATAL") } @@ -352,24 +349,6 @@ func genUpgradePlan(up *upgrade.Upgrade, isExternalEtcd bool) (*outputapiv1alpha return &outputapiv1alpha2.UpgradePlan{Components: components}, unstableVersionFlag, nil } -func getComponentConfigVersionStates(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, cfgPath string) ([]outputapiv1alpha2.ComponentConfigVersionState, error) { - docmap := kubeadmapi.DocumentMap{} - - if cfgPath != "" { - bytes, err := os.ReadFile(cfgPath) - if err != nil { - return nil, errors.Wrapf(err, "unable to read config file %q", cfgPath) - } - - docmap, err = kubeadmutil.SplitYAMLDocuments(bytes) - if err != nil { - return nil, err - } - } - - return componentconfigs.GetVersionStates(cfg, client, docmap) -} - // printUpgradePlan prints a UX-friendly overview of what versions are available to upgrade to func printUpgradePlan(up *upgrade.Upgrade, plan *outputapiv1alpha2.UpgradePlan, unstableVersionFlag string, isExternalEtcd bool, writer io.Writer, printer output.Printer) { printHeader := true diff --git a/cmd/kubeadm/app/cmd/util/cmdutil.go b/cmd/kubeadm/app/cmd/util/cmdutil.go index 6ee8d2d6ae32b..63e7b0ca17851 100644 --- a/cmd/kubeadm/app/cmd/util/cmdutil.go +++ b/cmd/kubeadm/app/cmd/util/cmdutil.go @@ -31,7 +31,6 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" @@ -103,17 +102,6 @@ func AddCRISocketFlag(flagSet *pflag.FlagSet, criSocket *string) { ) } -// DefaultInitConfiguration return default InitConfiguration. Avoid running the CRI auto-detection -// code as we don't need it. -func DefaultInitConfiguration() *kubeadmapiv1.InitConfiguration { - initCfg := &kubeadmapiv1.InitConfiguration{ - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - CRISocket: kubeadmconstants.UnknownCRISocket, // avoid CRI detection - }, - } - return initCfg -} - // InteractivelyConfirmAction asks the user whether they _really_ want to take the action. func InteractivelyConfirmAction(action, question string, r io.Reader) error { fmt.Printf("[%s] %s [y/N]: ", action, question) diff --git a/cmd/kubeadm/app/componentconfigs/configset.go b/cmd/kubeadm/app/componentconfigs/configset.go index 6c2f8e74b8003..0be33d0a292e4 100644 --- a/cmd/kubeadm/app/componentconfigs/configset.go +++ b/cmd/kubeadm/app/componentconfigs/configset.go @@ -289,38 +289,22 @@ func FetchFromClusterWithLocalOverwrites(clusterCfg *kubeadmapi.ClusterConfigura // GetVersionStates returns a slice of ComponentConfigVersionState structs // describing all supported component config groups that were identified on the cluster -func GetVersionStates(clusterCfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, docmap kubeadmapi.DocumentMap) ([]outputapiv1alpha2.ComponentConfigVersionState, error) { +func GetVersionStates(clusterCfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) ([]outputapiv1alpha2.ComponentConfigVersionState, error) { // We don't want to modify clusterCfg so we make a working deep copy of it. // Also, we don't want the defaulted component configs so we get rid of them. scratchClusterCfg := clusterCfg.DeepCopy() scratchClusterCfg.ComponentConfigs = kubeadmapi.ComponentConfigMap{} - // Call FetchFromClusterWithLocalOverwrites. This will populate the configs it can load and will return all - // UnsupportedConfigVersionError(s) in a sinle instance of a MultipleUnsupportedConfigVersionsError. - var multipleVerErrs UnsupportedConfigVersionsErrorMap - err := FetchFromClusterWithLocalOverwrites(scratchClusterCfg, client, docmap) + err := FetchFromCluster(scratchClusterCfg, client) if err != nil { - if vererrs, ok := err.(UnsupportedConfigVersionsErrorMap); ok { - multipleVerErrs = vererrs - } else { - // This seems to be a genuine error so we end here - return nil, err - } + // This seems to be a genuine error so we end here + return nil, err } results := []outputapiv1alpha2.ComponentConfigVersionState{} for _, handler := range known { group := handler.GroupVersion.Group - if vererr, ok := multipleVerErrs[group]; ok { - // If there is an UnsupportedConfigVersionError then we are dealing with a case where the config was user - // supplied and requires manual upgrade - results = append(results, outputapiv1alpha2.ComponentConfigVersionState{ - Group: group, - CurrentVersion: vererr.OldVersion.Version, - PreferredVersion: vererr.CurrentVersion.Version, - ManualUpgradeRequired: true, - }) - } else if _, ok := scratchClusterCfg.ComponentConfigs[group]; ok { + if _, ok := scratchClusterCfg.ComponentConfigs[group]; ok { // Normally loaded component config. No manual upgrade required on behalf of users. results = append(results, outputapiv1alpha2.ComponentConfigVersionState{ Group: group, diff --git a/cmd/kubeadm/app/componentconfigs/fakeconfig_test.go b/cmd/kubeadm/app/componentconfigs/fakeconfig_test.go index 520246c9a3071..36227bfd61361 100644 --- a/cmd/kubeadm/app/componentconfigs/fakeconfig_test.go +++ b/cmd/kubeadm/app/componentconfigs/fakeconfig_test.go @@ -617,72 +617,30 @@ func TestGetVersionStates(t *testing.T) { CurrentVersion: currentClusterConfigVersion, PreferredVersion: currentClusterConfigVersion, } - versionStateOld := outputapiv1alpha2.ComponentConfigVersionState{ - Group: kubeadmapiv1.GroupName, - CurrentVersion: oldClusterConfigVersion, - PreferredVersion: currentClusterConfigVersion, - ManualUpgradeRequired: true, - } cases := []struct { - desc string - obj runtime.Object - config string - expected outputapiv1alpha2.ComponentConfigVersionState + desc string + obj runtime.Object + expectedErr bool + expected outputapiv1alpha2.ComponentConfigVersionState }{ { - desc: "appropriate cluster object without overwrite", - obj: testClusterConfigMap(currentFooClusterConfig, false), - expected: versionStateCurrent, - }, - { - desc: "appropriate cluster object with appropriate overwrite", + desc: "appropriate cluster object", obj: testClusterConfigMap(currentFooClusterConfig, false), - config: dedent.Dedent(currentBarClusterConfig), expected: versionStateCurrent, }, { - desc: "appropriate cluster object with old overwrite", - obj: testClusterConfigMap(currentFooClusterConfig, false), - config: dedent.Dedent(oldBarClusterConfig), - expected: versionStateOld, - }, - { - desc: "old config without overwrite returns an error", - obj: testClusterConfigMap(oldFooClusterConfig, false), - expected: versionStateOld, - }, - { - desc: "old config with appropriate overwrite", - obj: testClusterConfigMap(oldFooClusterConfig, false), - config: dedent.Dedent(currentBarClusterConfig), - expected: versionStateCurrent, - }, - { - desc: "old config with old overwrite", - obj: testClusterConfigMap(oldFooClusterConfig, false), - config: dedent.Dedent(oldBarClusterConfig), - expected: versionStateOld, - }, - { - desc: "appropriate signed cluster object without overwrite", - obj: testClusterConfigMap(currentFooClusterConfig, true), - expected: versionStateCurrent, + desc: "old config returns an error", + obj: testClusterConfigMap(oldFooClusterConfig, false), + expectedErr: true, }, { - desc: "appropriate signed cluster object with appropriate overwrite", + desc: "appropriate signed cluster object", obj: testClusterConfigMap(currentFooClusterConfig, true), - config: dedent.Dedent(currentBarClusterConfig), expected: versionStateCurrent, }, { - desc: "appropriate signed cluster object with old overwrit", - obj: testClusterConfigMap(currentFooClusterConfig, true), - config: dedent.Dedent(oldBarClusterConfig), - expected: versionStateOld, - }, - { - desc: "old signed config without an overwrite", + desc: "old signed config", obj: testClusterConfigMap(oldFooClusterConfig, true), expected: outputapiv1alpha2.ComponentConfigVersionState{ Group: kubeadmapiv1.GroupName, @@ -690,38 +648,27 @@ func TestGetVersionStates(t *testing.T) { PreferredVersion: currentClusterConfigVersion, }, }, - { - desc: "old signed config with appropriate overwrite", - obj: testClusterConfigMap(oldFooClusterConfig, true), - config: dedent.Dedent(currentBarClusterConfig), - expected: versionStateCurrent, - }, - { - desc: "old signed config with old overwrite", - obj: testClusterConfigMap(oldFooClusterConfig, true), - config: dedent.Dedent(oldBarClusterConfig), - expected: versionStateOld, - }, } for _, test := range cases { t.Run(test.desc, func(t *testing.T) { client := clientsetfake.NewSimpleClientset(test.obj) - docmap, err := kubeadmutil.SplitYAMLDocuments([]byte(test.config)) - if err != nil { - t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err) - } - clusterCfg := testClusterCfg() - got, err := GetVersionStates(clusterCfg, client, docmap) - if err != nil { + got, err := GetVersionStates(clusterCfg, client) + if err != nil && !test.expectedErr { t.Errorf("unexpected error: %v", err) - } else if len(got) != 1 { - t.Errorf("got %d, but expected only a single result: %v", len(got), got) - } else if got[0] != test.expected { - t.Errorf("unexpected result:\n\texpected: %v\n\tgot: %v", test.expected, got[0]) + } + if err == nil { + if test.expectedErr { + t.Errorf("expected error not found: %v", test.expectedErr) + } + if len(got) != 1 { + t.Errorf("got %d, but expected only a single result: %v", len(got), got) + } else if got[0] != test.expected { + t.Errorf("unexpected result:\n\texpected: %v\n\tgot: %v", test.expected, got[0]) + } } }) } diff --git a/cmd/kubeadm/app/componentconfigs/kubelet.go b/cmd/kubeadm/app/componentconfigs/kubelet.go index 1071697e85ea5..d6b0fd10c66ff 100644 --- a/cmd/kubeadm/app/componentconfigs/kubelet.go +++ b/cmd/kubeadm/app/componentconfigs/kubelet.go @@ -23,7 +23,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" kubeletconfig "k8s.io/kubelet/config/v1beta1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" @@ -151,7 +151,7 @@ func (kc *kubeletConfig) Default(cfg *kubeadmapi.ClusterConfiguration, _ *kubead } if kc.config.Authentication.Anonymous.Enabled == nil { - kc.config.Authentication.Anonymous.Enabled = pointer.Bool(kubeletAuthenticationAnonymousEnabled) + kc.config.Authentication.Anonymous.Enabled = ptr.To(kubeletAuthenticationAnonymousEnabled) } else if *kc.config.Authentication.Anonymous.Enabled { warnDefaultComponentConfigValue(kind, "authentication.anonymous.enabled", kubeletAuthenticationAnonymousEnabled, *kc.config.Authentication.Anonymous.Enabled) } @@ -166,7 +166,7 @@ func (kc *kubeletConfig) Default(cfg *kubeadmapi.ClusterConfiguration, _ *kubead // Let clients using other authentication methods like ServiceAccount tokens also access the kubelet API if kc.config.Authentication.Webhook.Enabled == nil { - kc.config.Authentication.Webhook.Enabled = pointer.Bool(kubeletAuthenticationWebhookEnabled) + kc.config.Authentication.Webhook.Enabled = ptr.To(kubeletAuthenticationWebhookEnabled) } else if !*kc.config.Authentication.Webhook.Enabled { warnDefaultComponentConfigValue(kind, "authentication.webhook.enabled", kubeletAuthenticationWebhookEnabled, *kc.config.Authentication.Webhook.Enabled) } @@ -179,7 +179,7 @@ func (kc *kubeletConfig) Default(cfg *kubeadmapi.ClusterConfiguration, _ *kubead } if kc.config.HealthzPort == nil { - kc.config.HealthzPort = pointer.Int32(constants.KubeletHealthzPort) + kc.config.HealthzPort = ptr.To[int32](constants.KubeletHealthzPort) } else if *kc.config.HealthzPort != constants.KubeletHealthzPort { warnDefaultComponentConfigValue(kind, "healthzPort", constants.KubeletHealthzPort, *kc.config.HealthzPort) } @@ -203,7 +203,7 @@ func (kc *kubeletConfig) Default(cfg *kubeadmapi.ClusterConfiguration, _ *kubead } if ok { if kc.config.ResolverConfig == nil { - kc.config.ResolverConfig = pointer.String(kubeletSystemdResolverConfig) + kc.config.ResolverConfig = ptr.To(kubeletSystemdResolverConfig) } else { if *kc.config.ResolverConfig != kubeletSystemdResolverConfig { warnDefaultComponentConfigValue(kind, "resolvConf", kubeletSystemdResolverConfig, *kc.config.ResolverConfig) diff --git a/cmd/kubeadm/app/componentconfigs/kubelet_test.go b/cmd/kubeadm/app/componentconfigs/kubelet_test.go index cc46a7b250d98..4ebb718ef6628 100644 --- a/cmd/kubeadm/app/componentconfigs/kubelet_test.go +++ b/cmd/kubeadm/app/componentconfigs/kubelet_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" clientsetfake "k8s.io/client-go/kubernetes/fake" kubeletconfig "k8s.io/kubelet/config/v1beta1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" @@ -52,7 +52,7 @@ func TestKubeletDefault(t *testing.T) { var resolverConfig *string if isSystemdResolvedActive, _ := isServiceActive("systemd-resolved"); isSystemdResolvedActive { // If systemd-resolved is active, we need to set the default resolver config - resolverConfig = pointer.String(kubeletSystemdResolverConfig) + resolverConfig = ptr.To(kubeletSystemdResolverConfig) } tests := []struct { @@ -73,17 +73,17 @@ func TestKubeletDefault(t *testing.T) { ClientCAFile: constants.CACertName, }, Anonymous: kubeletconfig.KubeletAnonymousAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationAnonymousEnabled), + Enabled: ptr.To(kubeletAuthenticationAnonymousEnabled), }, Webhook: kubeletconfig.KubeletWebhookAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationWebhookEnabled), + Enabled: ptr.To(kubeletAuthenticationWebhookEnabled), }, }, Authorization: kubeletconfig.KubeletAuthorization{ Mode: kubeletconfig.KubeletAuthorizationModeWebhook, }, HealthzBindAddress: kubeletHealthzBindAddress, - HealthzPort: pointer.Int32(constants.KubeletHealthzPort), + HealthzPort: ptr.To[int32](constants.KubeletHealthzPort), RotateCertificates: kubeletRotateCertificates, ResolverConfig: resolverConfig, CgroupDriver: constants.CgroupDriverSystemd, @@ -107,17 +107,17 @@ func TestKubeletDefault(t *testing.T) { ClientCAFile: constants.CACertName, }, Anonymous: kubeletconfig.KubeletAnonymousAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationAnonymousEnabled), + Enabled: ptr.To(kubeletAuthenticationAnonymousEnabled), }, Webhook: kubeletconfig.KubeletWebhookAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationWebhookEnabled), + Enabled: ptr.To(kubeletAuthenticationWebhookEnabled), }, }, Authorization: kubeletconfig.KubeletAuthorization{ Mode: kubeletconfig.KubeletAuthorizationModeWebhook, }, HealthzBindAddress: kubeletHealthzBindAddress, - HealthzPort: pointer.Int32(constants.KubeletHealthzPort), + HealthzPort: ptr.To[int32](constants.KubeletHealthzPort), RotateCertificates: kubeletRotateCertificates, ResolverConfig: resolverConfig, CgroupDriver: constants.CgroupDriverSystemd, @@ -141,17 +141,17 @@ func TestKubeletDefault(t *testing.T) { ClientCAFile: constants.CACertName, }, Anonymous: kubeletconfig.KubeletAnonymousAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationAnonymousEnabled), + Enabled: ptr.To(kubeletAuthenticationAnonymousEnabled), }, Webhook: kubeletconfig.KubeletWebhookAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationWebhookEnabled), + Enabled: ptr.To(kubeletAuthenticationWebhookEnabled), }, }, Authorization: kubeletconfig.KubeletAuthorization{ Mode: kubeletconfig.KubeletAuthorizationModeWebhook, }, HealthzBindAddress: kubeletHealthzBindAddress, - HealthzPort: pointer.Int32(constants.KubeletHealthzPort), + HealthzPort: ptr.To[int32](constants.KubeletHealthzPort), RotateCertificates: kubeletRotateCertificates, ResolverConfig: resolverConfig, CgroupDriver: constants.CgroupDriverSystemd, @@ -176,17 +176,17 @@ func TestKubeletDefault(t *testing.T) { ClientCAFile: constants.CACertName, }, Anonymous: kubeletconfig.KubeletAnonymousAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationAnonymousEnabled), + Enabled: ptr.To(kubeletAuthenticationAnonymousEnabled), }, Webhook: kubeletconfig.KubeletWebhookAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationWebhookEnabled), + Enabled: ptr.To(kubeletAuthenticationWebhookEnabled), }, }, Authorization: kubeletconfig.KubeletAuthorization{ Mode: kubeletconfig.KubeletAuthorizationModeWebhook, }, HealthzBindAddress: kubeletHealthzBindAddress, - HealthzPort: pointer.Int32(constants.KubeletHealthzPort), + HealthzPort: ptr.To[int32](constants.KubeletHealthzPort), RotateCertificates: kubeletRotateCertificates, ResolverConfig: resolverConfig, CgroupDriver: constants.CgroupDriverSystemd, @@ -208,17 +208,17 @@ func TestKubeletDefault(t *testing.T) { ClientCAFile: filepath.Join("/path/to/certs", constants.CACertName), }, Anonymous: kubeletconfig.KubeletAnonymousAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationAnonymousEnabled), + Enabled: ptr.To(kubeletAuthenticationAnonymousEnabled), }, Webhook: kubeletconfig.KubeletWebhookAuthentication{ - Enabled: pointer.Bool(kubeletAuthenticationWebhookEnabled), + Enabled: ptr.To(kubeletAuthenticationWebhookEnabled), }, }, Authorization: kubeletconfig.KubeletAuthorization{ Mode: kubeletconfig.KubeletAuthorizationModeWebhook, }, HealthzBindAddress: kubeletHealthzBindAddress, - HealthzPort: pointer.Int32(constants.KubeletHealthzPort), + HealthzPort: ptr.To[int32](constants.KubeletHealthzPort), RotateCertificates: kubeletRotateCertificates, ResolverConfig: resolverConfig, CgroupDriver: constants.CgroupDriverSystemd, diff --git a/cmd/kubeadm/app/componentconfigs/kubelet_windows.go b/cmd/kubeadm/app/componentconfigs/kubelet_windows.go index 4573f6d7aa84d..1af50fb5dadb6 100644 --- a/cmd/kubeadm/app/componentconfigs/kubelet_windows.go +++ b/cmd/kubeadm/app/componentconfigs/kubelet_windows.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" "k8s.io/klog/v2" kubeletconfig "k8s.io/kubelet/config/v1beta1" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // Mutate modifies absolute path fields in the KubeletConfiguration to be Windows compatible absolute paths. @@ -70,7 +70,7 @@ func mutatePaths(cfg *kubeletconfig.KubeletConfiguration, drive string) { // Mutate the fields we care about. klog.V(2).Infof("[componentconfig] kubelet/Windows: changing field \"resolverConfig\" to empty") - cfg.ResolverConfig = utilpointer.String("") + cfg.ResolverConfig = ptr.To("") mutateStringField("staticPodPath", &cfg.StaticPodPath) mutateStringField("authentication.x509.clientCAFile", &cfg.Authentication.X509.ClientCAFile) } diff --git a/cmd/kubeadm/app/componentconfigs/kubelet_windows_test.go b/cmd/kubeadm/app/componentconfigs/kubelet_windows_test.go index 09c8d25a6f74b..56ea3d81112ca 100644 --- a/cmd/kubeadm/app/componentconfigs/kubelet_windows_test.go +++ b/cmd/kubeadm/app/componentconfigs/kubelet_windows_test.go @@ -22,7 +22,7 @@ import ( "testing" kubeletconfig "k8s.io/kubelet/config/v1beta1" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestMutatePaths(t *testing.T) { @@ -46,7 +46,7 @@ func TestMutatePaths(t *testing.T) { }, }, expected: &kubeletconfig.KubeletConfiguration{ - ResolverConfig: utilpointer.String(""), + ResolverConfig: ptr.To(""), StaticPodPath: filepath.Join(drive, "/foo/staticpods"), Authentication: kubeletconfig.KubeletAuthentication{ X509: kubeletconfig.KubeletX509Authentication{ @@ -67,7 +67,7 @@ func TestMutatePaths(t *testing.T) { }, }, expected: &kubeletconfig.KubeletConfiguration{ - ResolverConfig: utilpointer.String(""), + ResolverConfig: ptr.To(""), StaticPodPath: "./foo/staticpods", Authentication: kubeletconfig.KubeletAuthentication{ X509: kubeletconfig.KubeletX509Authentication{ diff --git a/cmd/kubeadm/app/componentconfigs/kubeproxy.go b/cmd/kubeadm/app/componentconfigs/kubeproxy.go index d4cfe6ab5bbe4..5b6fcf1af1c6d 100644 --- a/cmd/kubeadm/app/componentconfigs/kubeproxy.go +++ b/cmd/kubeadm/app/componentconfigs/kubeproxy.go @@ -17,7 +17,9 @@ limitations under the License. package componentconfigs import ( + "github.com/pkg/errors" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" kubeproxyconfig "k8s.io/kube-proxy/config/v1alpha1" netutils "k8s.io/utils/net" @@ -49,7 +51,14 @@ var kubeProxyHandler = handler{ } func kubeProxyConfigFromCluster(h *handler, clientset clientset.Interface, _ *kubeadmapi.ClusterConfiguration) (kubeadmapi.ComponentConfig, error) { - return h.fromConfigMap(clientset, kubeadmconstants.KubeProxyConfigMap, kubeadmconstants.KubeProxyConfigMapKey, false) + configMapName := kubeadmconstants.KubeProxyConfigMap + klog.V(1).Infof("attempting to download the KubeProxyConfiguration from ConfigMap %q", configMapName) + cm, err := h.fromConfigMap(clientset, configMapName, kubeadmconstants.KubeProxyConfigMapKey, false) + if err != nil { + return nil, errors.Wrapf(err, "could not download the kube-proxy configuration from ConfigMap %q", + configMapName) + } + return cm, nil } // kubeProxyConfig implements the kubeadmapi.ComponentConfig interface for kube-proxy diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index f316b9db8ce29..89864e1e03387 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -146,8 +146,11 @@ const ( // FrontProxyClientCertCommonName defines front proxy certificate common name FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) - // AdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the superuser/admin of the cluster + // AdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the admin of the cluster AdminKubeConfigFileName = "admin.conf" + // SuperAdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the super-admin of the cluster + SuperAdminKubeConfigFileName = "super-admin.conf" + // KubeletBootstrapKubeConfigFileName defines the file name for the kubeconfig that the kubelet will use to do // the TLS bootstrap to get itself an unique credential KubeletBootstrapKubeConfigFileName = "bootstrap-kubelet.conf" @@ -201,6 +204,10 @@ const ( NodeAutoApproveBootstrapClusterRoleBinding = "kubeadm:node-autoapprove-bootstrap" // NodeAutoApproveCertificateRotationClusterRoleBinding defines name of the ClusterRoleBinding that makes the csrapprover approve node auto rotated CSRs NodeAutoApproveCertificateRotationClusterRoleBinding = "kubeadm:node-autoapprove-certificate-rotation" + // ClusterAdminsGroupAndClusterRoleBinding is the name of the Group used for kubeadm generated cluster + // admin credentials and the name of the ClusterRoleBinding that binds the same Group to the "cluster-admin" + // built-in ClusterRole. + ClusterAdminsGroupAndClusterRoleBinding = "kubeadm:cluster-admins" // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation APICallRetryInterval = 500 * time.Millisecond @@ -261,9 +268,6 @@ const ( // init/join time for use later. kubeadm annotates the node object with this information AnnotationKubeadmCRISocket = "kubeadm.alpha.kubernetes.io/cri-socket" - // UnknownCRISocket defines the undetected or unknown CRI socket - UnknownCRISocket = "unix:///var/run/unknown.sock" - // KubeadmConfigConfigMap specifies in what ConfigMap in the kube-system namespace the `kubeadm init` configuration should be stored KubeadmConfigConfigMap = "kubeadm-config" @@ -453,7 +457,7 @@ var ( MinimumControlPlaneVersion = getSkewedKubernetesVersion(-1) // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports - MinimumKubeletVersion = getSkewedKubernetesVersion(-1) + MinimumKubeletVersion = getSkewedKubernetesVersion(-3) // CurrentKubernetesVersion specifies current Kubernetes version supported by kubeadm CurrentKubernetesVersion = getSkewedKubernetesVersion(0) @@ -570,6 +574,11 @@ func GetAdminKubeConfigPath() string { return filepath.Join(KubernetesDir, AdminKubeConfigFileName) } +// GetSuperAdminKubeConfigPath returns the location on the disk where admin kubeconfig is located by default +func GetSuperAdminKubeConfigPath() string { + return filepath.Join(KubernetesDir, SuperAdminKubeConfigFileName) +} + // GetBootstrapKubeletKubeConfigPath returns the location on the disk where bootstrap kubelet kubeconfig is located by default func GetBootstrapKubeletKubeConfigPath() string { return filepath.Join(KubernetesDir, KubeletBootstrapKubeConfigFileName) diff --git a/cmd/kubeadm/app/constants/constants_test.go b/cmd/kubeadm/app/constants/constants_test.go index 3b1dabb391288..bc33346a8d2f1 100644 --- a/cmd/kubeadm/app/constants/constants_test.go +++ b/cmd/kubeadm/app/constants/constants_test.go @@ -50,6 +50,19 @@ func TestGetAdminKubeConfigPath(t *testing.T) { } } +func TestGetSuperAdminKubeConfigPath(t *testing.T) { + expected := filepath.Join(KubernetesDir, SuperAdminKubeConfigFileName) + actual := GetSuperAdminKubeConfigPath() + + if actual != expected { + t.Errorf( + "failed GetSuperAdminKubeConfigPath:\n\texpected: %s\n\t actual: %s", + expected, + actual, + ) + } +} + func TestGetBootstrapKubeletKubeConfigPath(t *testing.T) { expected := filepath.FromSlash("/etc/kubernetes/bootstrap-kubelet.conf") actual := GetBootstrapKubeletKubeConfigPath() diff --git a/cmd/kubeadm/app/features/features.go b/cmd/kubeadm/app/features/features.go index 5c007a708d4be..eded870ab2761 100644 --- a/cmd/kubeadm/app/features/features.go +++ b/cmd/kubeadm/app/features/features.go @@ -42,7 +42,10 @@ const ( // InitFeatureGates are the default feature gates for the init command var InitFeatureGates = FeatureList{ - PublicKeysECDSA: {FeatureSpec: featuregate.FeatureSpec{Default: false, PreRelease: featuregate.Alpha}}, + PublicKeysECDSA: { + FeatureSpec: featuregate.FeatureSpec{Default: false, PreRelease: featuregate.Deprecated}, + DeprecationMessage: "The PublicKeysECDSA feature gate is deprecated and will be removed after the feature 'ClusterConfiguration.EncryptionAlgorithm' is added.", + }, RootlessControlPlane: {FeatureSpec: featuregate.FeatureSpec{Default: false, PreRelease: featuregate.Alpha}}, EtcdLearnerMode: {FeatureSpec: featuregate.FeatureSpec{Default: true, PreRelease: featuregate.Beta}}, UpgradeAddonsBeforeControlPlane: { @@ -143,7 +146,7 @@ func NewFeatureGate(f *FeatureList, value string) (map[string]bool, error) { } if featureSpec.PreRelease == featuregate.Deprecated { - klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting deprecated feature gate %s=%s. It will be removed in a future release.", k, v) } boolValue, err := strconv.ParseBool(v) diff --git a/cmd/kubeadm/app/phases/addons/dns/dns_test.go b/cmd/kubeadm/app/phases/addons/dns/dns_test.go index 00e57310ea03e..487b547673c76 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns_test.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns_test.go @@ -742,7 +742,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: @@ -862,8 +862,7 @@ metadata: for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { out := &bytes.Buffer{} - var replicas int32 - replicas = 3 + var replicas int32 = 3 if err := coreDNSAddon(tt.args.cfg, tt.args.client, &replicas, out, tt.args.printManifest); (err != nil) != tt.wantErr { t.Errorf("coreDNSAddon() error = %v, wantErr %v", err, tt.wantErr) return @@ -1008,7 +1007,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index 931897b16e2b0..905a2e050e691 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -141,7 +141,7 @@ spec: add: - NET_BIND_SERVICE drop: - - all + - ALL readOnlyRootFilesystem: true dnsPolicy: Default volumes: diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy.go b/cmd/kubeadm/app/phases/addons/proxy/proxy.go index 8a4978ea6b407..6c9fdd28492a3 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy.go @@ -189,7 +189,7 @@ func printOrCreateKubeProxyObjects(cmByte []byte, dsByte []byte, client clientse } func createKubeProxyConfigMap(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubeadmapi.APIEndpoint, client clientset.Interface, printManifest bool) ([]byte, error) { - // Generate ControlPlane Enpoint kubeconfig file + // Generate ControlPlane Endpoint kubeconfig file controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, localEndpoint) if err != nil { return []byte(""), err @@ -259,7 +259,7 @@ func createKubeProxyAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset } // Propagate the http/https proxy host environment variables to the container env := &kubeproxyDaemonSet.Spec.Template.Spec.Containers[0].Env - *env = append(*env, kubeadmutil.GetProxyEnvVars()...) + *env = append(*env, kubeadmutil.MergeKubeadmEnvVars(kubeadmutil.GetProxyEnvVars())...) // Create the DaemonSet for kube-proxy or update it in case it already exists return []byte(""), apiclient.CreateOrUpdateDaemonSet(client, kubeproxyDaemonSet) diff --git a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo_test.go b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo_test.go index e926523fa52b3..7bc4120a3385a 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo_test.go +++ b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo_test.go @@ -17,15 +17,20 @@ limitations under the License. package clusterinfo import ( + "context" "os" "testing" "text/template" + rbac "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/user" clientsetfake "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" ) var testConfigTempl = template.Must(template.New("test").Parse(`apiVersion: v1 @@ -47,25 +52,31 @@ users: func TestCreateBootstrapConfigMapIfNotExists(t *testing.T) { tests := []struct { name string + fileExist bool createErr error - updateErr error expectErr bool }{ { "successful case should have no error", - nil, + true, nil, false, }, { - "if both create and update errors, return error", + "if configmap already exists, return error", + true, apierrors.NewAlreadyExists(schema.GroupResource{Resource: "configmaps"}, "test"), - apierrors.NewUnauthorized("go away!"), true, }, { "unexpected error should be returned", + true, apierrors.NewUnauthorized("go away!"), + true, + }, + { + "if the file does not exist, return error", + false, nil, true, }, @@ -102,7 +113,11 @@ func TestCreateBootstrapConfigMapIfNotExists(t *testing.T) { }) } - err := CreateBootstrapConfigMapIfNotExists(client, file.Name()) + fileName := file.Name() + if !tc.fileExist { + fileName = "notexistfile" + } + err := CreateBootstrapConfigMapIfNotExists(client, fileName) if tc.expectErr && err == nil { t.Errorf("CreateBootstrapConfigMapIfNotExists(%s) wanted error, got nil", tc.name) } else if !tc.expectErr && err != nil { @@ -112,3 +127,71 @@ func TestCreateBootstrapConfigMapIfNotExists(t *testing.T) { } } } + +func TestCreateClusterInfoRBACRules(t *testing.T) { + tests := []struct { + name string + client *clientsetfake.Clientset + }{ + { + name: "the RBAC rules already exist", + client: newMockClientForTest(t), + }, + { + name: "the RBAC rules do not exist", + client: clientsetfake.NewSimpleClientset(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := CreateClusterInfoRBACRules(tt.client); err != nil { + t.Errorf("CreateClusterInfoRBACRules() hits unexpected error: %v", err) + } + }) + } +} + +func newMockClientForTest(t *testing.T) *clientsetfake.Clientset { + client := clientsetfake.NewSimpleClientset() + + _, err := client.RbacV1().Roles(metav1.NamespacePublic).Create(context.TODO(), &rbac.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: BootstrapSignerClusterRoleName, + Namespace: metav1.NamespacePublic, + }, + Rules: []rbac.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"Secret"}, + ResourceNames: []string{bootstrapapi.ConfigMapClusterInfo}, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating role: %v", err) + } + + _, err = client.RbacV1().RoleBindings(metav1.NamespacePublic).Create(context.TODO(), &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: BootstrapSignerClusterRoleName, + Namespace: metav1.NamespacePublic, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "Role", + Name: BootstrapSignerClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.UserKind, + Name: user.Anonymous, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating rolebinding: %v", err) + } + + return client +} diff --git a/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go b/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go index 14a7b19848338..a01cd3fbf1726 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go +++ b/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap.go @@ -55,8 +55,7 @@ func AllowBoostrapTokensToGetNodes(client clientset.Interface) error { if err := apiclient.CreateOrUpdateClusterRole(client, &rbac.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: constants.GetNodesClusterRoleName, - Namespace: metav1.NamespaceSystem, + Name: constants.GetNodesClusterRoleName, }, Rules: []rbac.PolicyRule{ { @@ -71,8 +70,7 @@ func AllowBoostrapTokensToGetNodes(client clientset.Interface) error { return apiclient.CreateOrUpdateClusterRoleBinding(client, &rbac.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: constants.GetNodesClusterRoleName, - Namespace: metav1.NamespaceSystem, + Name: constants.GetNodesClusterRoleName, }, RoleRef: rbac.RoleRef{ APIGroup: rbac.GroupName, diff --git a/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap_test.go b/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap_test.go new file mode 100644 index 0000000000000..6574d61e554b9 --- /dev/null +++ b/cmd/kubeadm/app/phases/bootstraptoken/node/tlsbootstrap_test.go @@ -0,0 +1,301 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "context" + "testing" + + rbac "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + clientsetfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +func TestAllowBootstrapTokensToPostCSRs(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + }{ + { + name: "ClusterRoleBindings is empty", + client: clientsetfake.NewSimpleClientset(), + }, + { + name: "ClusterRoleBindings already exists", + client: newMockClusterRoleBinddingClientForTest(t, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.NodeKubeletBootstrap, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.NodeBootstrapperClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodeBootstrapTokenAuthGroup, + }, + }, + }), + }, + { + name: "Create new ClusterRoleBindings", + client: newMockClusterRoleBinddingClientForTest(t, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.NodeKubeletBootstrap, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.NodeBootstrapperClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.KubeProxyClusterRoleName, + }, + }, + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := AllowBootstrapTokensToPostCSRs(tt.client); err != nil { + t.Errorf("AllowBootstrapTokensToPostCSRs() return error = %v", err) + } + }) + } +} + +func TestAutoApproveNodeBootstrapTokens(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + }{ + { + name: "ClusterRoleBindings is empty", + client: clientsetfake.NewSimpleClientset(), + }, + { + name: "ClusterRoleBindings already exists", + client: newMockClusterRoleBinddingClientForTest(t, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.NodeAutoApproveBootstrapClusterRoleBinding, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.CSRAutoApprovalClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodeBootstrapTokenAuthGroup, + }, + }, + }), + }, + { + name: "Create new ClusterRoleBindings", + client: newMockClusterRoleBinddingClientForTest(t, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.NodeAutoApproveBootstrapClusterRoleBinding, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.NodeSelfCSRAutoApprovalClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodeBootstrapTokenAuthGroup, + }, + }, + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := AutoApproveNodeBootstrapTokens(tt.client); err != nil { + t.Errorf("AutoApproveNodeBootstrapTokens() return error = %v", err) + } + }) + } +} + +func TestAutoApproveNodeCertificateRotation(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + }{ + { + name: "ClusterRoleBindings is empty", + client: clientsetfake.NewSimpleClientset(), + }, + { + name: "ClusterRoleBindings already exists", + client: newMockClusterRoleBinddingClientForTest(t, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.NodeAutoApproveCertificateRotationClusterRoleBinding, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.NodeSelfCSRAutoApprovalClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodesGroup, + }, + }, + }), + }, + { + name: "Create new ClusterRoleBindings", + client: newMockClusterRoleBinddingClientForTest(t, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.NodeAutoApproveCertificateRotationClusterRoleBinding, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.NodeSelfCSRAutoApprovalClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodeBootstrapTokenAuthGroup, + }, + }, + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := AutoApproveNodeCertificateRotation(tt.client); err != nil { + t.Errorf("AutoApproveNodeCertificateRotation() return error = %v", err) + } + }) + } +} + +func TestAllowBoostrapTokensToGetNodes(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + }{ + { + name: "RBAC rules are empty", + client: clientsetfake.NewSimpleClientset(), + }, + { + name: "RBAC rules already exists", + client: newMockRbacClientForTest(t, &rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.GetNodesClusterRoleName, + }, + Rules: []rbac.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + }, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.GetNodesClusterRoleName, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.GetNodesClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodeBootstrapTokenAuthGroup, + }, + }, + }), + }, + { + name: "Create new RBAC rules", + client: newMockRbacClientForTest(t, &rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.GetNodesClusterRoleName, + }, + Rules: []rbac.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + }, &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.GetNodesClusterRoleName, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: constants.NodeAutoApproveBootstrapClusterRoleBinding, + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: constants.NodeBootstrapTokenAuthGroup, + }, + }, + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := AllowBoostrapTokensToGetNodes(tt.client); err != nil { + t.Errorf("AllowBoostrapTokensToGetNodes() return error = %v", err) + } + }) + } +} + +func newMockClusterRoleBinddingClientForTest(t *testing.T, clusterRoleBinding *rbac.ClusterRoleBinding) *clientsetfake.Clientset { + client := clientsetfake.NewSimpleClientset() + _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) + + if err != nil { + t.Fatalf("error creating ClusterRoleBindings: %v", err) + } + return client +} + +func newMockRbacClientForTest(t *testing.T, clusterRole *rbac.ClusterRole, clusterRoleBinding *rbac.ClusterRoleBinding) *clientsetfake.Clientset { + client := clientsetfake.NewSimpleClientset() + _, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating ClusterRoles: %v", err) + } + _, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating ClusterRoleBindings: %v", err) + } + return client +} diff --git a/cmd/kubeadm/app/phases/bootstraptoken/node/token_test.go b/cmd/kubeadm/app/phases/bootstraptoken/node/token_test.go new file mode 100644 index 0000000000000..44e214b6a43e5 --- /dev/null +++ b/cmd/kubeadm/app/phases/bootstraptoken/node/token_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "context" + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientsetfake "k8s.io/client-go/kubernetes/fake" + bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1" +) + +func TestUpdateOrCreateTokens(t *testing.T) { + tests := []struct { + name string + failIfExists bool + tokens []bootstraptokenv1.BootstrapToken + wantErr bool + }{ + { + name: "token is nil", + failIfExists: true, + tokens: []bootstraptokenv1.BootstrapToken{}, + wantErr: false, + }, + { + name: "create secret which does not exist", + failIfExists: true, + tokens: []bootstraptokenv1.BootstrapToken{ + { + Token: &bootstraptokenv1.BootstrapTokenString{ + ID: "token1", + Secret: "token1data", + }, + }, + }, + wantErr: false, + }, + { + name: "create multiple secrets which do not exist", + failIfExists: true, + tokens: []bootstraptokenv1.BootstrapToken{ + { + Token: &bootstraptokenv1.BootstrapTokenString{ + ID: "token1", + Secret: "token1data", + }, + }, + { + Token: &bootstraptokenv1.BootstrapTokenString{ + ID: "token2", + Secret: "token2data", + }, + }, + { + Token: &bootstraptokenv1.BootstrapTokenString{ + ID: "token3", + Secret: "token3data", + }, + }, + }, + wantErr: false, + }, + { + name: "create secret which exists, failIfExists is false", + failIfExists: false, + tokens: []bootstraptokenv1.BootstrapToken{ + { + Token: &bootstraptokenv1.BootstrapTokenString{ + ID: "foo", + Secret: "bar", + }, + }, + }, + wantErr: false, + }, + { + name: "create secret which exists, failIfExists is true", + failIfExists: true, + tokens: []bootstraptokenv1.BootstrapToken{ + { + Token: &bootstraptokenv1.BootstrapTokenString{ + ID: "foo", + Secret: "bar", + }, + }, + }, + wantErr: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + client := newMockClientForTest(t) + if err := UpdateOrCreateTokens(client, tc.failIfExists, tc.tokens); (err != nil) != tc.wantErr { + t.Fatalf("UpdateOrCreateTokens() error = %v, wantErr %v", err, tc.wantErr) + } + }) + } +} + +func newMockClientForTest(t *testing.T) *clientsetfake.Clientset { + client := clientsetfake.NewSimpleClientset() + _, err := client.CoreV1().Secrets(metav1.NamespaceSystem).Create(context.TODO(), &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bootstrap-token-foo", + Labels: map[string]string{"app": "foo"}, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string][]byte{"foo": {'f', 'o', 'o'}}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating sercet: %v", err) + } + return client +} diff --git a/cmd/kubeadm/app/phases/certs/certlist.go b/cmd/kubeadm/app/phases/certs/certlist.go index 177d82241234d..8c5d04665e31c 100644 --- a/cmd/kubeadm/app/phases/certs/certlist.go +++ b/cmd/kubeadm/app/phases/certs/certlist.go @@ -60,7 +60,7 @@ func (k *KubeadmCert) GetConfig(ic *kubeadmapi.InitConfiguration) (*pkiutil.Cert } } - k.config.PublicKeyAlgorithm = ic.ClusterConfiguration.PublicKeyAlgorithm() + k.config.EncryptionAlgorithm = ic.ClusterConfiguration.EncryptionAlgorithmType() return &k.config, nil } diff --git a/cmd/kubeadm/app/phases/certs/certs.go b/cmd/kubeadm/app/phases/certs/certs.go index 7c823960fe587..ea4d34528c07b 100644 --- a/cmd/kubeadm/app/phases/certs/certs.go +++ b/cmd/kubeadm/app/phases/certs/certs.go @@ -69,12 +69,12 @@ func CreatePKIAssets(cfg *kubeadmapi.InitConfiguration) error { fmt.Printf("[certs] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir) // Service accounts are not x509 certs, so handled separately - return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir, cfg.ClusterConfiguration.PublicKeyAlgorithm()) + return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir, cfg.ClusterConfiguration.EncryptionAlgorithmType()) } // CreateServiceAccountKeyAndPublicKeyFiles creates new public/private key files for signing service account users. // If the sa public/private key files already exist in the target folder, they are used only if evaluated equals; otherwise an error is returned. -func CreateServiceAccountKeyAndPublicKeyFiles(certsDir string, keyType x509.PublicKeyAlgorithm) error { +func CreateServiceAccountKeyAndPublicKeyFiles(certsDir string, keyType kubeadmapi.EncryptionAlgorithmType) error { klog.V(1).Infoln("creating new public/private key files for signing service account users") _, err := keyutil.PrivateKeyFromFile(filepath.Join(certsDir, kubeadmconstants.ServiceAccountPrivateKeyName)) if err == nil { diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index a399551432b18..755ff124ac886 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -347,7 +347,7 @@ func TestCreateServiceAccountKeyAndPublicKeyFiles(t *testing.T) { } } - err := CreateServiceAccountKeyAndPublicKeyFiles(dir, x509.RSA) + err := CreateServiceAccountKeyAndPublicKeyFiles(dir, kubeadmapi.EncryptionAlgorithmRSA) if (err != nil) != tt.expectedErr { t.Fatalf("expected error: %v, got: %v, error: %v", tt.expectedErr, err != nil, err) } else if tt.expectedErr { diff --git a/cmd/kubeadm/app/phases/certs/renewal/manager.go b/cmd/kubeadm/app/phases/certs/renewal/manager.go index 764bcb01a80e5..27b350694b179 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/manager.go +++ b/cmd/kubeadm/app/phases/certs/renewal/manager.go @@ -141,6 +141,10 @@ func NewManager(cfg *kubeadmapi.ClusterConfiguration, kubernetesDir string) (*Ma longName: "certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself", fileName: kubeadmconstants.AdminKubeConfigFileName, }, + { + longName: "certificate embedded in the kubeconfig file for the super-admin", + fileName: kubeadmconstants.SuperAdminKubeConfigFileName, + }, { longName: "certificate embedded in the kubeconfig file for the controller manager to use", fileName: kubeadmconstants.ControllerManagerKubeConfigFileName, @@ -227,8 +231,8 @@ func (rm *Manager) RenewUsingLocalCA(name string) (bool, error) { // extract the certificate config cfg := &pkiutil.CertConfig{ - Config: certToConfig(cert), - PublicKeyAlgorithm: rm.cfg.PublicKeyAlgorithm(), + Config: certToConfig(cert), + EncryptionAlgorithm: rm.cfg.EncryptionAlgorithmType(), } // reads the CA @@ -270,8 +274,8 @@ func (rm *Manager) CreateRenewCSR(name, outdir string) error { // extracts the certificate config cfg := &pkiutil.CertConfig{ - Config: certToConfig(cert), - PublicKeyAlgorithm: rm.cfg.PublicKeyAlgorithm(), + Config: certToConfig(cert), + EncryptionAlgorithm: rm.cfg.EncryptionAlgorithmType(), } // generates the CSR request and save it diff --git a/cmd/kubeadm/app/phases/certs/renewal/manager_test.go b/cmd/kubeadm/app/phases/certs/renewal/manager_test.go index ca9640049e555..9163d2d151721 100644 --- a/cmd/kubeadm/app/phases/certs/renewal/manager_test.go +++ b/cmd/kubeadm/app/phases/certs/renewal/manager_test.go @@ -64,7 +64,7 @@ func TestNewManager(t *testing.T) { { name: "cluster with local etcd", cfg: &kubeadmapi.ClusterConfiguration{}, - expectedCertificates: 10, //[admin apiserver apiserver-etcd-client apiserver-kubelet-client controller-manager etcd/healthcheck-client etcd/peer etcd/server front-proxy-client scheduler] + expectedCertificates: 11, // [admin super-admin apiserver apiserver-etcd-client apiserver-kubelet-client controller-manager etcd/healthcheck-client etcd/peer etcd/server front-proxy-client scheduler] }, { name: "cluster with external etcd", @@ -73,7 +73,7 @@ func TestNewManager(t *testing.T) { External: &kubeadmapi.ExternalEtcd{}, }, }, - expectedCertificates: 6, // [admin apiserver apiserver-kubelet-client controller-manager front-proxy-client scheduler] + expectedCertificates: 7, // [admin super-admin apiserver apiserver-kubelet-client controller-manager front-proxy-client scheduler] }, } diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index 7096c1e79db9e..998ca2e3456b7 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -48,7 +48,7 @@ func CreateInitStaticPodManifestFiles(manifestDir, patchesDir string, cfg *kubea // GetStaticPodSpecs returns all staticPodSpecs actualized to the context of the current configuration // NB. this method holds the information about how kubeadm creates static pod manifests. -func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint, proxyEnvs []v1.EnvVar) map[string]v1.Pod { +func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint, proxyEnvs []kubeadmapi.EnvVar) map[string]v1.Pod { // Get the required hostpath mounts mounts := getHostPathVolumesForTheControlPlane(cfg) if proxyEnvs == nil { @@ -67,7 +67,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap ReadinessProbe: staticpodutil.ReadinessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/readyz", endpoint.BindPort, v1.URISchemeHTTPS), StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", endpoint.BindPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane), Resources: staticpodutil.ComponentResources("250m"), - Env: kubeadmutil.MergeEnv(proxyEnvs, cfg.APIServer.ExtraEnvs), + Env: kubeadmutil.MergeKubeadmEnvVars(proxyEnvs, cfg.APIServer.ExtraEnvs), }, mounts.GetVolumes(kubeadmconstants.KubeAPIServer), map[string]string{kubeadmconstants.KubeAPIServerAdvertiseAddressEndpointAnnotationKey: endpoint.String()}), kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{ @@ -79,7 +79,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetControllerManagerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeControllerManagerPort, v1.URISchemeHTTPS), StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetControllerManagerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeControllerManagerPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane), Resources: staticpodutil.ComponentResources("200m"), - Env: kubeadmutil.MergeEnv(proxyEnvs, cfg.ControllerManager.ExtraEnvs), + Env: kubeadmutil.MergeKubeadmEnvVars(proxyEnvs, cfg.ControllerManager.ExtraEnvs), }, mounts.GetVolumes(kubeadmconstants.KubeControllerManager), nil), kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{ Name: kubeadmconstants.KubeScheduler, @@ -90,7 +90,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetSchedulerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeSchedulerPort, v1.URISchemeHTTPS), StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetSchedulerProbeAddress(cfg), "/healthz", kubeadmconstants.KubeSchedulerPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane), Resources: staticpodutil.ComponentResources("100m"), - Env: kubeadmutil.MergeEnv(proxyEnvs, cfg.Scheduler.ExtraEnvs), + Env: kubeadmutil.MergeKubeadmEnvVars(proxyEnvs, cfg.Scheduler.ExtraEnvs), }, mounts.GetVolumes(kubeadmconstants.KubeScheduler), nil), } return staticPodSpecs diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index 78b09014d1123..6bf1e5057fa8b 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -52,13 +52,15 @@ func TestGetStaticPodSpecs(t *testing.T) { // Creates a Cluster Configuration cfg := &kubeadmapi.ClusterConfiguration{ KubernetesVersion: "v1.9.0", - Scheduler: kubeadmapi.ControlPlaneComponent{ExtraEnvs: []v1.EnvVar{ - {Name: "Foo", Value: "Bar"}, + Scheduler: kubeadmapi.ControlPlaneComponent{ExtraEnvs: []kubeadmapi.EnvVar{ + { + EnvVar: v1.EnvVar{Name: "Foo", Value: "Bar"}, + }, }}, } // Executes GetStaticPodSpecs - specs := GetStaticPodSpecs(cfg, &kubeadmapi.APIEndpoint{}, []v1.EnvVar{}) + specs := GetStaticPodSpecs(cfg, &kubeadmapi.APIEndpoint{}, []kubeadmapi.EnvVar{}) var tests = []struct { name string diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index e58c6eb9ce7bd..8c3e5c99b36b7 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -224,7 +224,7 @@ func GetEtcdPodSpec(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.A }, LivenessProbe: staticpodutil.LivenessProbe(probeHostname, "/health?exclude=NOSPACE&serializable=true", probePort, probeScheme), StartupProbe: staticpodutil.StartupProbe(probeHostname, "/health?serializable=false", probePort, probeScheme, cfg.APIServer.TimeoutForControlPlane), - Env: cfg.Etcd.Local.ExtraEnvs, + Env: kubeadmutil.MergeKubeadmEnvVars(cfg.Etcd.Local.ExtraEnvs), }, etcdMounts, // etcd will listen on the advertise address of the API server, in a different port (2379) diff --git a/cmd/kubeadm/app/phases/etcd/local_test.go b/cmd/kubeadm/app/phases/etcd/local_test.go index 4aa181a51ccf9..e04c8655eb584 100644 --- a/cmd/kubeadm/app/phases/etcd/local_test.go +++ b/cmd/kubeadm/app/phases/etcd/local_test.go @@ -22,11 +22,13 @@ package etcd import ( "fmt" "os" + "path" "path/filepath" "reflect" "sort" "testing" + "github.com/google/go-cmp/cmp" "github.com/lithammer/dedent" v1 "k8s.io/api/core/v1" @@ -44,8 +46,10 @@ func TestGetEtcdPodSpec(t *testing.T) { Etcd: kubeadmapi.Etcd{ Local: &kubeadmapi.LocalEtcd{ DataDir: "/var/lib/etcd", - ExtraEnvs: []v1.EnvVar{ - {Name: "Foo", Value: "Bar"}, + ExtraEnvs: []kubeadmapi.EnvVar{ + { + EnvVar: v1.EnvVar{Name: "Foo", Value: "Bar"}, + }, }, }, }, @@ -71,8 +75,9 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) { defer os.RemoveAll(tmpdir) var tests = []struct { - cfg *kubeadmapi.ClusterConfiguration - expectedError bool + cfg *kubeadmapi.ClusterConfiguration + expectedError bool + expectedManifest string }{ { cfg: &kubeadmapi.ClusterConfiguration{ @@ -84,6 +89,89 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) { }, }, expectedError: false, + expectedManifest: fmt.Sprintf(`apiVersion: v1 +kind: Pod +metadata: + annotations: + kubeadm.kubernetes.io/etcd.advertise-client-urls: https://:2379 + creationTimestamp: null + labels: + component: etcd + tier: control-plane + name: etcd + namespace: kube-system +spec: + containers: + - command: + - etcd + - --advertise-client-urls=https://:2379 + - --cert-file=etcd/server.crt + - --client-cert-auth=true + - --data-dir=%s/etcd + - --experimental-initial-corrupt-check=true + - --experimental-watch-progress-notify-interval=5s + - --initial-advertise-peer-urls=https://:2380 + - --initial-cluster==https://:2380 + - --key-file=etcd/server.key + - --listen-client-urls=https://127.0.0.1:2379,https://:2379 + - --listen-metrics-urls=http://127.0.0.1:2381 + - --listen-peer-urls=https://:2380 + - --name= + - --peer-cert-file=etcd/peer.crt + - --peer-client-cert-auth=true + - --peer-key-file=etcd/peer.key + - --peer-trusted-ca-file=etcd/ca.crt + - --snapshot-count=10000 + - --trusted-ca-file=etcd/ca.crt + image: /etcd:%s + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /health?exclude=NOSPACE&serializable=true + port: 2381 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 15 + name: etcd + resources: + requests: + cpu: 100m + memory: 100Mi + startupProbe: + failureThreshold: 24 + httpGet: + host: 127.0.0.1 + path: /health?serializable=false + port: 2381 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 15 + volumeMounts: + - mountPath: %s/etcd + name: etcd-data + - mountPath: /etcd + name: etcd-certs + hostNetwork: true + priority: 2000001000 + priorityClassName: system-node-critical + securityContext: + seccompProfile: + type: RuntimeDefault + volumes: + - hostPath: + path: /etcd + type: DirectoryOrCreate + name: etcd-certs + - hostPath: + path: %s/etcd + type: DirectoryOrCreate + name: etcd-data +status: {} +`, tmpdir, kubeadmconstants.DefaultEtcdVersion, tmpdir, tmpdir), }, { cfg: &kubeadmapi.ClusterConfiguration{ @@ -115,6 +203,16 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) { // Assert expected files are there testutil.AssertFilesCount(t, manifestPath, 1) testutil.AssertFileExists(t, manifestPath, kubeadmconstants.Etcd+".yaml") + manifestBytes, err := os.ReadFile(path.Join(manifestPath, kubeadmconstants.Etcd+".yaml")) + if err != nil { + t.Errorf("failed to load generated manifest file: %v", err) + } + if test.expectedManifest != string(manifestBytes) { + t.Errorf( + "File created by CreateLocalEtcdStaticPodManifestFile is not as expected. Diff: \n%s", + cmp.Diff(string(manifestBytes), test.expectedManifest), + ) + } } else { testutil.AssertError(t, err, "etcd static pod manifest cannot be generated for cluster using external etcd") } @@ -165,6 +263,9 @@ func TestCreateLocalEtcdStaticPodManifestFileWithPatches(t *testing.T) { t.Errorf("Error executing ReadStaticPodFromDisk: %v", err) return } + if pod.Spec.DNSPolicy != "" { + t.Errorf("DNSPolicy should be empty but: %v", pod.Spec.DNSPolicy) + } if _, ok := pod.ObjectMeta.Annotations["patched"]; !ok { t.Errorf("Patches were not applied to %s", kubeadmconstants.Etcd) diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index eeac15bb95860..0798befe929b8 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -18,6 +18,7 @@ package kubeconfig import ( "bytes" + "context" "crypto" "crypto/x509" "fmt" @@ -28,6 +29,11 @@ import ( "github.com/pkg/errors" + rbac "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" @@ -97,6 +103,9 @@ func CreateJoinControlPlaneKubeConfigFiles(outDir string, cfg *kubeadmapi.InitCo return nil } +// CreateKubeConfigFileFunc defines a function type used for creating kubeconfig files. +type CreateKubeConfigFileFunc func(string, string, *kubeadmapi.InitConfiguration) error + // CreateKubeConfigFile creates a kubeconfig file. // If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned. func CreateKubeConfigFile(kubeConfigFileName string, outDir string, cfg *kubeadmapi.InitConfiguration) error { @@ -407,6 +416,7 @@ func ValidateKubeconfigsForExternalCA(outDir string, cfg *kubeadmapi.InitConfigu validationConfigCPE := kubeconfigutil.CreateBasic(controlPlaneEndpoint, "dummy", "dummy", pkiutil.EncodeCertPEM(caCert)) kubeConfigFileNamesCPE := []string{ kubeadmconstants.AdminKubeConfigFileName, + kubeadmconstants.SuperAdminKubeConfigFileName, kubeadmconstants.KubeletKubeConfigFileName, } @@ -433,6 +443,13 @@ func getKubeConfigSpecsBase(cfg *kubeadmapi.InitConfiguration) (map[string]*kube kubeadmconstants.AdminKubeConfigFileName: { APIServer: controlPlaneEndpoint, ClientName: "kubernetes-admin", + ClientCertAuth: &clientCertAuth{ + Organizations: []string{kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding}, + }, + }, + kubeadmconstants.SuperAdminKubeConfigFileName: { + APIServer: controlPlaneEndpoint, + ClientName: "kubernetes-super-admin", ClientCertAuth: &clientCertAuth{ Organizations: []string{kubeadmconstants.SystemPrivilegedGroup}, }, @@ -482,7 +499,7 @@ func createKubeConfigAndCSR(kubeConfigDir string, kubeadmConfig *kubeadmapi.Init clientCertConfig := newClientCertConfigFromKubeConfigSpec(spec, nil) - clientKey, err := pkiutil.NewPrivateKey(clientCertConfig.PublicKeyAlgorithm) + clientKey, err := pkiutil.NewPrivateKey(clientCertConfig.EncryptionAlgorithm) if err != nil { return err } @@ -541,3 +558,140 @@ func CreateDefaultKubeConfigsAndCSRFiles(out io.Writer, kubeConfigDir string, ku } return nil } + +// EnsureRBACFunc defines a function type that can be passed to EnsureAdminClusterRoleBinding(). +type EnsureRBACFunc func(context.Context, clientset.Interface, clientset.Interface, time.Duration, time.Duration) (clientset.Interface, error) + +// EnsureAdminClusterRoleBinding constructs a client from admin.conf and optionally +// constructs a client from super-admin.conf if the file exists. It then proceeds +// to pass the clients to EnsureAdminClusterRoleBindingImpl. The function returns a +// usable client from admin.conf with RBAC properly constructed or an error. +func EnsureAdminClusterRoleBinding(outDir string, ensureRBACFunc EnsureRBACFunc) (clientset.Interface, error) { + var ( + err error + adminClient, superAdminClient clientset.Interface + ) + + // Create a client from admin.conf. + adminClient, err = kubeconfigutil.ClientSetFromFile(filepath.Join(outDir, kubeadmconstants.AdminKubeConfigFileName)) + if err != nil { + return nil, err + } + + // Create a client from super-admin.conf. + superAdminPath := filepath.Join(outDir, kubeadmconstants.SuperAdminKubeConfigFileName) + if _, err := os.Stat(superAdminPath); err == nil { + superAdminClient, err = kubeconfigutil.ClientSetFromFile(superAdminPath) + if err != nil { + return nil, err + } + } + + if ensureRBACFunc == nil { + ensureRBACFunc = EnsureAdminClusterRoleBindingImpl + } + + ctx := context.Background() + return ensureRBACFunc( + ctx, adminClient, superAdminClient, kubeadmconstants.APICallRetryInterval, kubeadmconstants.APICallWithWriteTimeout) +} + +// EnsureAdminClusterRoleBindingImpl first attempts to see if the ClusterRoleBinding +// kubeadm:cluster-admins exists by using adminClient. If it already exists, +// it would mean the adminClient is usable. If it does not, attempt to create +// the ClusterRoleBinding by using superAdminClient. +func EnsureAdminClusterRoleBindingImpl(ctx context.Context, adminClient, superAdminClient clientset.Interface, + retryInterval, retryTimeout time.Duration) (clientset.Interface, error) { + + klog.V(1).Infof("ensuring that the ClusterRoleBinding for the %s Group exists", + kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding) + + var ( + err, lastError error + crbResult *rbac.ClusterRoleBinding + clusterRoleBinding = &rbac.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding, + }, + }, + } + ) + + // First try to create the CRB with the admin.conf client. If the admin.conf contains a User bound + // to the built-in super-user group, this will pass. In all other cases an error will be returned. + // The poll here is required to ensure the API server is reachable during "kubeadm init" workflows. + err = wait.PollUntilContextTimeout( + ctx, + retryInterval, + retryTimeout, + true, func(ctx context.Context) (bool, error) { + if crbResult, err = adminClient.RbacV1().ClusterRoleBindings().Create( + ctx, + clusterRoleBinding, + metav1.CreateOptions{}, + ); err != nil { + if apierrors.IsForbidden(err) { + // If it encounters a forbidden error this means that the API server was reached + // but the CRB is missing - i.e. the admin.conf user does not have permissions + // to create its own permission RBAC yet. + // + // When a "create" call is made, but the resource is forbidden, a non-nil + // CRB will still be returned. Return true here, but update "crbResult" to nil, + // to ensure that the process continues with super-admin.conf. + crbResult = nil + return true, nil + } else if apierrors.IsAlreadyExists(err) { + // If the CRB exists it means the admin.conf already has the right + // permissions; return. + return true, nil + } else { + // Retry on any other error type. + lastError = errors.Wrap(err, "unable to create ClusterRoleBinding") + return false, nil + } + } + return true, nil + }) + if err != nil { + return nil, lastError + } + + // The CRB exists; return the admin.conf client. + if crbResult != nil { + return adminClient, nil + } + + // If the superAdminClient is nil at this point we cannot proceed creating the CRB; return an error. + if superAdminClient == nil { + return nil, errors.Errorf("the ClusterRoleBinding for the %s Group is missing but there is no %s to create it", + kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding, + kubeadmconstants.SuperAdminKubeConfigFileName) + } + + // Create the ClusterRoleBinding with the super-admin.conf client. + klog.V(1).Infof("creating the ClusterRoleBinding for the %s Group by using %s", + kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding, + kubeadmconstants.SuperAdminKubeConfigFileName) + + if _, err := superAdminClient.RbacV1().ClusterRoleBindings().Create( + ctx, + clusterRoleBinding, + metav1.CreateOptions{}, + ); err != nil { + return nil, errors.Wrapf(err, "unable to create the %s ClusterRoleBinding", + kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding) + } + + // Once the CRB is in place, start using the admin.conf client. + return adminClient, nil +} diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go index 91d35f02ffd4e..cdbc4e513002b 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go @@ -18,6 +18,7 @@ package kubeconfig import ( "bytes" + "context" "crypto" "crypto/x509" "fmt" @@ -26,14 +27,24 @@ import ( "path/filepath" "reflect" "testing" + "time" "github.com/lithammer/dedent" - + "github.com/pkg/errors" + + rbac "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + clientset "k8s.io/client-go/kubernetes" + clientsetfake "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" certstestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" @@ -119,6 +130,11 @@ func TestGetKubeConfigSpecs(t *testing.T) { { kubeConfigFile: kubeadmconstants.AdminKubeConfigFileName, clientName: "kubernetes-admin", + organizations: []string{kubeadmconstants.ClusterAdminsGroupAndClusterRoleBinding}, + }, + { + kubeConfigFile: kubeadmconstants.SuperAdminKubeConfigFileName, + clientName: "kubernetes-super-admin", organizations: []string{kubeadmconstants.SystemPrivilegedGroup}, }, { @@ -174,7 +190,7 @@ func TestGetKubeConfigSpecs(t *testing.T) { } switch assertion.kubeConfigFile { - case kubeadmconstants.AdminKubeConfigFileName, kubeadmconstants.KubeletKubeConfigFileName: + case kubeadmconstants.AdminKubeConfigFileName, kubeadmconstants.SuperAdminKubeConfigFileName, kubeadmconstants.KubeletKubeConfigFileName: if spec.APIServer != controlPlaneEndpoint { t.Errorf("expected getKubeConfigSpecs for %s to set cfg.APIServer to %s, got %s", assertion.kubeConfigFile, controlPlaneEndpoint, spec.APIServer) @@ -615,8 +631,9 @@ func TestValidateKubeconfigsForExternalCA(t *testing.T) { }, "some files don't exist": { filesToWrite: map[string]*clientcmdapi.Config{ - kubeadmconstants.AdminKubeConfigFileName: config, - kubeadmconstants.KubeletKubeConfigFileName: config, + kubeadmconstants.AdminKubeConfigFileName: config, + kubeadmconstants.SuperAdminKubeConfigFileName: config, + kubeadmconstants.KubeletKubeConfigFileName: config, }, initConfig: initConfig, expectedError: true, @@ -624,6 +641,7 @@ func TestValidateKubeconfigsForExternalCA(t *testing.T) { "some files have invalid CA": { filesToWrite: map[string]*clientcmdapi.Config{ kubeadmconstants.AdminKubeConfigFileName: config, + kubeadmconstants.SuperAdminKubeConfigFileName: config, kubeadmconstants.KubeletKubeConfigFileName: config, kubeadmconstants.ControllerManagerKubeConfigFileName: configWithAnotherClusterCa, kubeadmconstants.SchedulerKubeConfigFileName: config, @@ -634,6 +652,7 @@ func TestValidateKubeconfigsForExternalCA(t *testing.T) { "some files have a different Server URL": { filesToWrite: map[string]*clientcmdapi.Config{ kubeadmconstants.AdminKubeConfigFileName: config, + kubeadmconstants.SuperAdminKubeConfigFileName: config, kubeadmconstants.KubeletKubeConfigFileName: config, kubeadmconstants.ControllerManagerKubeConfigFileName: config, kubeadmconstants.SchedulerKubeConfigFileName: configWithAnotherServerURL, @@ -643,6 +662,7 @@ func TestValidateKubeconfigsForExternalCA(t *testing.T) { "all files are valid": { filesToWrite: map[string]*clientcmdapi.Config{ kubeadmconstants.AdminKubeConfigFileName: config, + kubeadmconstants.SuperAdminKubeConfigFileName: config, kubeadmconstants.KubeletKubeConfigFileName: config, kubeadmconstants.ControllerManagerKubeConfigFileName: config, kubeadmconstants.SchedulerKubeConfigFileName: config, @@ -715,3 +735,196 @@ func setupdKubeConfigWithTokenAuth(t *testing.T, caCert *x509.Certificate, APISe return config } + +func TestEnsureAdminClusterRoleBinding(t *testing.T) { + dir := testutil.SetupTempDir(t) + defer os.RemoveAll(dir) + + cfg := testutil.GetDefaultInternalConfig(t) + cfg.CertificatesDir = dir + + ca := certsphase.KubeadmCertRootCA() + _, _, err := ca.CreateAsCA(cfg) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + expectedRBACError bool + expectedError bool + missingAdminConf bool + missingSuperAdminConf bool + }{ + { + name: "no errors", + }, + { + name: "expect RBAC error", + expectedRBACError: true, + expectedError: true, + }, + { + name: "admin.conf is missing", + missingAdminConf: true, + expectedError: true, + }, + { + name: "super-admin.conf is missing", + missingSuperAdminConf: true, + expectedError: false, // The file is optional. + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ensureRBACFunc := func(_ context.Context, adminClient clientset.Interface, superAdminClient clientset.Interface, + _ time.Duration, _ time.Duration) (clientset.Interface, error) { + + if tc.expectedRBACError { + return nil, errors.New("ensureRBACFunc error") + } + return adminClient, nil + } + + // Create the admin.conf and super-admin.conf so that EnsureAdminClusterRoleBinding + // can create clients from the files. + os.Remove(filepath.Join(dir, kubeadmconstants.AdminKubeConfigFileName)) + if !tc.missingAdminConf { + if err := CreateKubeConfigFile(kubeadmconstants.AdminKubeConfigFileName, dir, cfg); err != nil { + t.Fatal(err) + } + } + os.Remove(filepath.Join(dir, kubeadmconstants.SuperAdminKubeConfigFileName)) + if !tc.missingSuperAdminConf { + if err := CreateKubeConfigFile(kubeadmconstants.SuperAdminKubeConfigFileName, dir, cfg); err != nil { + t.Fatal(err) + } + } + + client, err := EnsureAdminClusterRoleBinding(dir, ensureRBACFunc) + if (err != nil) != tc.expectedError { + t.Fatalf("expected error: %v, got: %v, error: %v", err != nil, tc.expectedError, err) + } + + if err == nil && client == nil { + t.Fatal("got nil client") + } + }) + } +} + +func TestEnsureAdminClusterRoleBindingImpl(t *testing.T) { + tests := []struct { + name string + setupAdminClient func(*clientsetfake.Clientset) + setupSuperAdminClient func(*clientsetfake.Clientset) + expectedError bool + }{ + { + name: "admin.conf: handle forbidden errors when the super-admin.conf client is nil", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewForbidden( + schema.GroupResource{}, "name", errors.New("")) + }) + }, + expectedError: true, + }, + { + // A "create" call against a real server can return a forbidden error and a non-nil CRB + name: "admin.conf: handle forbidden error and returned CRBs, when the super-admin.conf client is nil", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, &rbac.ClusterRoleBinding{}, apierrors.NewForbidden( + schema.GroupResource{}, "name", errors.New("")) + }) + }, + expectedError: true, + }, + { + name: "admin.conf: CRB already exists, use the admin.conf client", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewAlreadyExists( + schema.GroupResource{}, "name") + }) + }, + expectedError: true, + }, + { + name: "admin.conf: handle other errors, such as a server timeout", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewServerTimeout( + schema.GroupResource{}, "create", 0) + }) + }, + expectedError: true, + }, + { + name: "admin.conf: CRB exists, return a client from admin.conf", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, &rbac.ClusterRoleBinding{}, nil + }) + }, + expectedError: false, + }, + { + name: "super-admin.conf: error while creating CRB", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewForbidden( + schema.GroupResource{}, "name", errors.New("")) + }) + }, + setupSuperAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewServerTimeout( + schema.GroupResource{}, "create", 0) + }) + }, + expectedError: true, + }, + { + name: "super-admin.conf: admin.conf cannot create CRB, create CRB with super-admin.conf, return client from admin.conf", + setupAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewForbidden( + schema.GroupResource{}, "name", errors.New("")) + }) + }, + setupSuperAdminClient: func(client *clientsetfake.Clientset) { + client.PrependReactor("create", "clusterrolebindings", func(action clientgotesting.Action) (bool, runtime.Object, error) { + return true, &rbac.ClusterRoleBinding{}, nil + }) + }, + expectedError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + adminClient := clientsetfake.NewSimpleClientset() + tc.setupAdminClient(adminClient) + + var superAdminClient clientset.Interface // ensure superAdminClient is nil by default + if tc.setupSuperAdminClient != nil { + fakeSuperAdminClient := clientsetfake.NewSimpleClientset() + tc.setupSuperAdminClient(fakeSuperAdminClient) + superAdminClient = fakeSuperAdminClient + } + + client, err := EnsureAdminClusterRoleBindingImpl( + context.Background(), adminClient, superAdminClient, time.Millisecond*50, time.Millisecond*1000) + if (err != nil) != tc.expectedError { + t.Fatalf("expected error: %v, got %v, error: %v", tc.expectedError, err != nil, err) + } + + if err == nil && client == nil { + t.Fatal("got nil client") + } + }) + } +} diff --git a/cmd/kubeadm/app/phases/upgrade/health.go b/cmd/kubeadm/app/phases/upgrade/health.go index bc08071923947..a09dc1395c4b8 100644 --- a/cmd/kubeadm/app/phases/upgrade/health.go +++ b/cmd/kubeadm/app/phases/upgrade/health.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -112,14 +112,14 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) Namespace: ns, }, Spec: batchv1.JobSpec{ - BackoffLimit: pointer.Int32(0), + BackoffLimit: ptr.To[int32](0), Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ RestartPolicy: v1.RestartPolicyNever, SecurityContext: &v1.PodSecurityContext{ - RunAsUser: pointer.Int64(999), - RunAsGroup: pointer.Int64(999), - RunAsNonRoot: pointer.Bool(true), + RunAsUser: ptr.To[int64](999), + RunAsGroup: ptr.To[int64](999), + RunAsNonRoot: ptr.To(true), }, Tolerations: []v1.Toleration{ { diff --git a/cmd/kubeadm/app/phases/upgrade/policy.go b/cmd/kubeadm/app/phases/upgrade/policy.go index eed1ed5346eaf..497e03747ddfb 100644 --- a/cmd/kubeadm/app/phases/upgrade/policy.go +++ b/cmd/kubeadm/app/phases/upgrade/policy.go @@ -35,7 +35,7 @@ const ( MaximumAllowedMinorVersionDowngradeSkew = 1 // MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster - MaximumAllowedMinorVersionKubeletSkew = 1 + MaximumAllowedMinorVersionKubeletSkew = 3 ) // VersionSkewPolicyErrors describes version skew errors that might be seen during the validation process in EnforceVersionPolicies diff --git a/cmd/kubeadm/app/phases/upgrade/policy_test.go b/cmd/kubeadm/app/phases/upgrade/policy_test.go index 5d97eca84a7f7..a747046a3619d 100644 --- a/cmd/kubeadm/app/phases/upgrade/policy_test.go +++ b/cmd/kubeadm/app/phases/upgrade/policy_test.go @@ -90,6 +90,24 @@ func TestEnforceVersionPolicies(t *testing.T) { }, newK8sVersion: "v1.13.0", expectedMandatoryErrs: 1, // can't upgrade two minor versions + }, + { + name: "upgrading with n-3 kubelet is supported", + vg: &fakeVersionGetter{ + clusterVersion: "v1.14.3", + kubeletVersion: "v1.12.3", + kubeadmVersion: "v1.15.0", + }, + newK8sVersion: "v1.15.0", + }, + { + name: "upgrading with n-4 kubelet is not supported", + vg: &fakeVersionGetter{ + clusterVersion: "v1.14.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.15.0", + }, + newK8sVersion: "v1.15.0", expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large }, { @@ -123,13 +141,22 @@ func TestEnforceVersionPolicies(t *testing.T) { expectedMandatoryErrs: 1, }, { - name: "the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.", + name: "the maximum skew between the cluster version and the kubelet versions should be three minor version.", + vg: &fakeVersionGetter{ + clusterVersion: "v1.13.0", + kubeletVersion: "v1.10.8", + kubeadmVersion: "v1.13.0", + }, + newK8sVersion: "v1.13.0", + }, + { + name: "the maximum skew between the cluster version and the kubelet versions should be three minor version. This may be forced through though.", vg: &fakeVersionGetter{ - clusterVersion: "v1.12.0", + clusterVersion: "v1.14.0", kubeletVersion: "v1.10.8", - kubeadmVersion: "v1.12.0", + kubeadmVersion: "v1.14.0", }, - newK8sVersion: "v1.12.0", + newK8sVersion: "v1.14.0", expectedSkippableErrs: 1, }, { diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 09ad32e70d847..336efbf2174fb 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy" "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo" nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node" + kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" patchnodephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode" "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig" @@ -69,6 +70,12 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon errs = append(errs, err) } + // TODO: remove this in the 1.30 release cycle: + // https://github.com/kubernetes/kubeadm/issues/2414 + if err := createSuperAdminKubeConfig(cfg, kubeadmconstants.KubernetesDir, dryRun, nil, nil); err != nil { + errs = append(errs, err) + } + // Annotate the node with the crisocket information, sourced either from the InitConfiguration struct or // --cri-socket. // TODO: In the future we want to use something more official like NodeStatus or similar for detecting this properly @@ -248,6 +255,7 @@ func unupgradedControlPlaneInstances(client clientset.Interface, nodeName string return nil, nil } +// WriteKubeletConfigFiles writes the kubelet config file to disk, but first creates a backup of any existing one. func WriteKubeletConfigFiles(cfg *kubeadmapi.InitConfiguration, patchesDir string, dryRun bool, out io.Writer) error { // Set up the kubelet directory to use. If dry-running, this will return a fake directory kubeletDir, err := GetKubeletDir(dryRun) @@ -296,3 +304,64 @@ func GetKubeletDir(dryRun bool) (string, error) { } return kubeadmconstants.KubeletRunDirectory, nil } + +// createSuperAdminKubeConfig creates new admin.conf and super-admin.conf and then +// ensures that the admin.conf client has RBAC permissions to be cluster-admin. +// TODO: this code must not be present in the 1.30 release, remove it during the 1.30 +// release cycle: +// https://github.com/kubernetes/kubeadm/issues/2414 +func createSuperAdminKubeConfig(cfg *kubeadmapi.InitConfiguration, outDir string, dryRun bool, + ensureRBACFunc kubeconfigphase.EnsureRBACFunc, + createKubeConfigFileFunc kubeconfigphase.CreateKubeConfigFileFunc) error { + + if dryRun { + fmt.Printf("[dryrun] Would create a separate %s and RBAC for %s", + kubeadmconstants.SuperAdminKubeConfigFileName, kubeadmconstants.AdminKubeConfigFileName) + return nil + } + + if ensureRBACFunc == nil { + ensureRBACFunc = kubeconfigphase.EnsureAdminClusterRoleBindingImpl + } + if createKubeConfigFileFunc == nil { + createKubeConfigFileFunc = kubeconfigphase.CreateKubeConfigFile + } + + var ( + err error + adminPath = filepath.Join(outDir, kubeadmconstants.AdminKubeConfigFileName) + adminBackupPath = adminPath + ".backup" + superAdminPath = filepath.Join(outDir, kubeadmconstants.SuperAdminKubeConfigFileName) + superAdminBackupPath = superAdminPath + ".backup" + ) + + // Create new admin.conf and super-admin.conf. + // If something goes wrong, old existing files will be restored from backup as a best effort. + + restoreBackup := func() { + _ = os.Rename(adminBackupPath, adminPath) + _ = os.Rename(superAdminBackupPath, superAdminPath) + } + + _ = os.Rename(adminPath, adminBackupPath) + if err = createKubeConfigFileFunc(kubeadmconstants.AdminKubeConfigFileName, outDir, cfg); err != nil { + restoreBackup() + return err + } + + _ = os.Rename(superAdminPath, superAdminBackupPath) + if err = createKubeConfigFileFunc(kubeadmconstants.SuperAdminKubeConfigFileName, outDir, cfg); err != nil { + restoreBackup() + return err + } + + // Ensure the RBAC for admin.conf exists. + if _, err = kubeconfigphase.EnsureAdminClusterRoleBinding(outDir, ensureRBACFunc); err != nil { + restoreBackup() + return err + } + + _ = os.Remove(adminBackupPath) + _ = os.Remove(superAdminBackupPath) + return nil +} diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go b/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go index f090889beb186..68595e43ba258 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go @@ -17,18 +17,25 @@ limitations under the License. package upgrade import ( + "context" "os" "path/filepath" + "reflect" "regexp" "strings" "testing" + "time" "github.com/pkg/errors" errorsutil "k8s.io/apimachinery/pkg/util/errors" + clientset "k8s.io/client-go/kubernetes" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" "k8s.io/kubernetes/cmd/kubeadm/app/constants" + certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" testutil "k8s.io/kubernetes/cmd/kubeadm/test" ) @@ -230,3 +237,106 @@ func rollbackFiles(files map[string]string, originalErr error) error { } return errors.Errorf("couldn't move these files: %v. Got errors: %v", files, errorsutil.NewAggregate(errs)) } + +// TODO: Remove this unit test during the 1.30 release cycle: +// https://github.com/kubernetes/kubeadm/issues/2414 +func TestCreateSuperAdminKubeConfig(t *testing.T) { + dir := testutil.SetupTempDir(t) + defer os.RemoveAll(dir) + + cfg := testutil.GetDefaultInternalConfig(t) + cfg.CertificatesDir = dir + + ca := certsphase.KubeadmCertRootCA() + _, _, err := ca.CreateAsCA(cfg) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + kubeConfigExist bool + expectRBACError bool + expectedError bool + expectKubeConfigError bool + }{ + { + name: "no error", + }, + { + name: "no error, kubeconfig files already exist", + kubeConfigExist: true, + }, + { + name: "return RBAC error", + expectRBACError: true, + expectedError: true, + }, + { + name: "return kubeconfig error", + expectKubeConfigError: true, + expectedError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + + // Define a custom RBAC test function, so that there is no test coverage overlap. + ensureRBACFunc := func(context.Context, clientset.Interface, clientset.Interface, + time.Duration, time.Duration) (clientset.Interface, error) { + + if tc.expectRBACError { + return nil, errors.New("ensureRBACFunc error") + } + return nil, nil + } + + // Define a custom kubeconfig function so that we can fail at least one call. + kubeConfigFunc := func(a string, b string, cfg *kubeadmapi.InitConfiguration) error { + if tc.expectKubeConfigError { + return errors.New("kubeConfigFunc error") + } + return kubeconfigphase.CreateKubeConfigFile(a, b, cfg) + } + + // If kubeConfigExist is true, pre-create the admin.conf and super-admin.conf files. + if tc.kubeConfigExist { + b := []byte("foo") + if err := os.WriteFile(filepath.Join(dir, constants.AdminKubeConfigFileName), b, 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, constants.SuperAdminKubeConfigFileName), b, 0644); err != nil { + t.Fatal(err) + } + } + + // Call createSuperAdminKubeConfig() with a custom ensureRBACFunc(). + err := createSuperAdminKubeConfig(cfg, dir, false, ensureRBACFunc, kubeConfigFunc) + if (err != nil) != tc.expectedError { + t.Fatalf("expected error: %v, got: %v, error: %v", err != nil, tc.expectedError, err) + } + + // Obtain the list of files in the directory after createSuperAdminKubeConfig() is done. + var files []string + fileInfo, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + for _, file := range fileInfo { + files = append(files, file.Name()) + } + + // Verify the expected files. + expectedFiles := []string{ + constants.AdminKubeConfigFileName, + constants.CACertName, + constants.CAKeyName, + constants.SuperAdminKubeConfigFileName, + } + if !reflect.DeepEqual(expectedFiles, files) { + t.Fatalf("expected files: %v, got: %v", expectedFiles, files) + } + }) + } +} diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 540c1549fffa7..e5c88ac6f9eee 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -214,13 +214,15 @@ func upgradeComponent(component string, certsRenewMgr *renewal.Manager, waiter a recoverManifests[component] = backupManifestPath // Skip upgrade if current and new manifests are equal - equal, err := staticpod.ManifestFilesAreEqual(currentManifestPath, newManifestPath) + equal, diff, err := staticpod.ManifestFilesAreEqual(currentManifestPath, newManifestPath) if err != nil { return err } if equal { fmt.Printf("[upgrade/staticpods] Current and new manifests of %s are equal, skipping upgrade\n", component) return nil + } else { + klog.V(4).Infof("Pod manifest files diff:\n%s\n", diff) } // if certificate renewal should be performed @@ -495,6 +497,20 @@ func StaticPodControlPlane(client clientset.Interface, waiter apiclient.Waiter, // if not error, but not renewed because of external CA detected, inform the user fmt.Printf("[upgrade/staticpods] External CA detected, %s certificate can't be renewed\n", constants.AdminKubeConfigFileName) } + + // Do the same for super-admin.conf, but only if it exists + if _, err := os.Stat(filepath.Join(pathMgr.KubernetesDir(), constants.SuperAdminKubeConfigFileName)); err == nil { + // renew the certificate embedded in the super-admin.conf file + renewed, err := certsRenewMgr.RenewUsingLocalCA(constants.SuperAdminKubeConfigFileName) + if err != nil { + return rollbackOldManifests(recoverManifests, errors.Wrapf(err, "failed to upgrade the %s certificates", constants.SuperAdminKubeConfigFileName), pathMgr, false) + } + + if !renewed { + // if not error, but not renewed because of external CA detected, inform the user + fmt.Printf("[upgrade/staticpods] External CA detected, %s certificate can't be renewed\n", constants.SuperAdminKubeConfigFileName) + } + } } // Remove the temporary directories used on a best-effort (don't fail if the calls error out) diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index b3f79d9b1c362..4781a888c3644 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -61,7 +61,6 @@ apiVersion: %s kind: InitConfiguration nodeRegistration: name: foo - criSocket: %s localAPIEndpoint: advertiseAddress: 192.168.2.2 bindPort: 6443 @@ -86,7 +85,7 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 -`, kubeadmapiv1.SchemeGroupVersion.String(), constants.UnknownCRISocket) +`, kubeadmapiv1.SchemeGroupVersion.String()) // fakeWaiter is a fake apiclient.Waiter that returns errors it was initialized with type fakeWaiter struct { @@ -330,9 +329,7 @@ func TestStaticPodControlPlane(t *testing.T) { waitForHashChange: nil, waitForPodsWithLabel: nil, }, - moveFileFunc: func(oldPath, newPath string) error { - return os.Rename(oldPath, newPath) - }, + moveFileFunc: os.Rename, expectedErr: false, manifestShouldChange: true, }, @@ -343,9 +340,7 @@ func TestStaticPodControlPlane(t *testing.T) { waitForHashChange: nil, waitForPodsWithLabel: nil, }, - moveFileFunc: func(oldPath, newPath string) error { - return os.Rename(oldPath, newPath) - }, + moveFileFunc: os.Rename, expectedErr: true, manifestShouldChange: false, }, @@ -356,9 +351,7 @@ func TestStaticPodControlPlane(t *testing.T) { waitForHashChange: errors.New("boo! failed"), waitForPodsWithLabel: nil, }, - moveFileFunc: func(oldPath, newPath string) error { - return os.Rename(oldPath, newPath) - }, + moveFileFunc: os.Rename, expectedErr: true, manifestShouldChange: false, }, @@ -369,9 +362,7 @@ func TestStaticPodControlPlane(t *testing.T) { waitForHashChange: nil, waitForPodsWithLabel: errors.New("boo! failed"), }, - moveFileFunc: func(oldPath, newPath string) error { - return os.Rename(oldPath, newPath) - }, + moveFileFunc: os.Rename, expectedErr: true, manifestShouldChange: false, }, @@ -433,9 +424,7 @@ func TestStaticPodControlPlane(t *testing.T) { waitForHashChange: nil, waitForPodsWithLabel: nil, }, - moveFileFunc: func(oldPath, newPath string) error { - return os.Rename(oldPath, newPath) - }, + moveFileFunc: os.Rename, skipKubeConfig: constants.SchedulerKubeConfigFileName, expectedErr: true, manifestShouldChange: false, @@ -447,13 +436,34 @@ func TestStaticPodControlPlane(t *testing.T) { waitForHashChange: nil, waitForPodsWithLabel: nil, }, - moveFileFunc: func(oldPath, newPath string) error { - return os.Rename(oldPath, newPath) - }, + moveFileFunc: os.Rename, skipKubeConfig: constants.AdminKubeConfigFileName, expectedErr: true, manifestShouldChange: false, }, + { + description: "super-admin.conf is renewed if it exists", + waitErrsToReturn: map[string]error{ + waitForHashes: nil, + waitForHashChange: nil, + waitForPodsWithLabel: nil, + }, + moveFileFunc: os.Rename, + expectedErr: false, + manifestShouldChange: true, + }, + { + description: "no error is thrown if super-admin.conf does not exist", + waitErrsToReturn: map[string]error{ + waitForHashes: nil, + waitForHashChange: nil, + waitForPodsWithLabel: nil, + }, + moveFileFunc: os.Rename, + skipKubeConfig: constants.SuperAdminKubeConfigFileName, + expectedErr: false, + manifestShouldChange: true, + }, } for i := range tests { @@ -495,6 +505,7 @@ func TestStaticPodControlPlane(t *testing.T) { for _, kubeConfig := range []string{ constants.AdminKubeConfigFileName, + constants.SuperAdminKubeConfigFileName, constants.SchedulerKubeConfigFileName, constants.ControllerManagerKubeConfigFileName, } { @@ -600,7 +611,7 @@ func getConfig(version, certsDir, etcdDataDir string) (*kubeadmapi.InitConfigura configBytes := []byte(fmt.Sprintf(testConfiguration, certsDir, etcdDataDir, version)) // Unmarshal the config - return configutil.BytesToInitConfiguration(configBytes) + return configutil.BytesToInitConfiguration(configBytes, true /* skipCRIDetect */) } func getTempDir(t *testing.T, name string) (string, func()) { diff --git a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go index a5eb4bc04137f..31fbe7c5dae2a 100644 --- a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go +++ b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go @@ -56,7 +56,7 @@ func TestUploadConfiguration(t *testing.T) { cfg.ComponentConfigs = kubeadmapi.ComponentConfigMap{} cfg.ClusterConfiguration.KubernetesVersion = kubeadmconstants.MinimumControlPlaneVersion.WithPatch(10).String() cfg.NodeRegistration.Name = "node-foo" - cfg.NodeRegistration.CRISocket = kubeadmconstants.UnknownCRISocket + cfg.NodeRegistration.CRISocket = kubeadmconstants.DefaultCRISocket client := clientsetfake.NewSimpleClientset() // For idempotent test, we check the result of the second call. diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 815122825af39..70265a84cf2d5 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -893,6 +893,7 @@ func (MemCheck) Name() string { return "Mem" } +// InitNodeChecks returns checks specific to "kubeadm init" func InitNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.InitConfiguration, ignorePreflightErrors sets.Set[string], isSecondaryControlPlane bool, downloadCerts bool) ([]Checker, error) { if !isSecondaryControlPlane { // First, check if we're root separately from the other preflight checks and fail fast @@ -1013,6 +1014,7 @@ func RunInitNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.InitConfigura return RunChecks(checks, os.Stderr, ignorePreflightErrors) } +// JoinNodeChecks returns checks specific to "kubeadm join" func JoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.JoinConfiguration, ignorePreflightErrors sets.Set[string]) ([]Checker, error) { // First, check if we're root separately from the other preflight checks and fail fast if err := RunRootCheckOnly(ignorePreflightErrors); err != nil { diff --git a/cmd/kubeadm/app/preflight/checks_test.go b/cmd/kubeadm/app/preflight/checks_test.go index b5927fa0f7cbd..28a493ea9d09d 100644 --- a/cmd/kubeadm/app/preflight/checks_test.go +++ b/cmd/kubeadm/app/preflight/checks_test.go @@ -1071,10 +1071,12 @@ func TestJoinIPCheck(t *testing.T) { if _, err := isPrivileged.Check(); err != nil { t.Skip("not a privileged user") } + + opts := configutil.LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + } + internalcfg, err := configutil.DefaultedJoinConfiguration(&kubeadmapiv1.JoinConfiguration{ - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - CRISocket: constants.UnknownCRISocket, - }, Discovery: kubeadmapiv1.Discovery{ BootstrapToken: &kubeadmapiv1.BootstrapTokenDiscovery{ Token: configutil.PlaceholderToken.Token.String(), @@ -1082,7 +1084,7 @@ func TestJoinIPCheck(t *testing.T) { UnsafeSkipCAVerification: true, }, }, - }) + }, opts) if err != nil { t.Fatalf("unexpected failure when defaulting JoinConfiguration: %v", err) } diff --git a/cmd/kubeadm/app/util/arguments.go b/cmd/kubeadm/app/util/arguments.go index 82ab5d48a506a..126857207a634 100644 --- a/cmd/kubeadm/app/util/arguments.go +++ b/cmd/kubeadm/app/util/arguments.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) @@ -34,26 +35,22 @@ import ( // only the instances of this argument in the overrides to be applied. func ArgumentsToCommand(base []kubeadmapi.Arg, overrides []kubeadmapi.Arg) []string { var command []string - // Copy the base arguments into a new slice. - args := make([]kubeadmapi.Arg, len(base)) - copy(args, base) - - // Go trough the override arguments and delete all instances of arguments with the same name - // in the base list of arguments. - for i := 0; i < len(overrides); i++ { - repeat: - for j := 0; j < len(args); j++ { - if overrides[i].Name == args[j].Name { - // Remove this existing argument and search for another argument - // with the same name in base. - args = append(args[:j], args[j+1:]...) - goto repeat - } + // Copy the overrides arguments into a new slice. + args := make([]kubeadmapi.Arg, len(overrides)) + copy(args, overrides) + + // overrideArgs is a set of args which will replace the args defined in the base + overrideArgs := sets.New[string]() + for _, arg := range overrides { + overrideArgs.Insert(arg.Name) + } + + for _, arg := range base { + if !overrideArgs.Has(arg.Name) { + args = append(args, arg) } } - // Concatenate the overrides and the base arguments and then sort them. - args = append(args, overrides...) sort.Slice(args, func(i, j int) bool { if args[i].Name == args[j].Name { return args[i].Value < args[j].Value diff --git a/cmd/kubeadm/app/util/config/cluster.go b/cmd/kubeadm/app/util/config/cluster.go index 6a2fbb414f29f..b4b3371dd98c8 100644 --- a/cmd/kubeadm/app/util/config/cluster.go +++ b/cmd/kubeadm/app/util/config/cluster.go @@ -59,7 +59,8 @@ func FetchInitConfigurationFromCluster(client clientset.Interface, printer outpu } // Apply dynamic defaults - if err := SetInitDynamicDefaults(cfg); err != nil { + // NB. skip CRI detection here because it won't be used at all and will be overridden later + if err := SetInitDynamicDefaults(cfg, true); err != nil { return nil, err } @@ -116,15 +117,6 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte if err := getAPIEndpoint(client, initcfg.NodeRegistration.Name, &initcfg.LocalAPIEndpoint); err != nil { return nil, errors.Wrap(err, "failed to getAPIEndpoint") } - } else { - // In the case where newControlPlane is true we don't go through getNodeRegistration() and initcfg.NodeRegistration.CRISocket is empty. - // This forces DetectCRISocket() to be called later on, and if there is more than one CRI installed on the system, it will error out, - // while asking for the user to provide an override for the CRI socket. Even if the user provides an override, the call to - // DetectCRISocket() can happen too early and thus ignore it (while still erroring out). - // However, if newControlPlane == true, initcfg.NodeRegistration is not used at all and it's overwritten later on. - // Thus it's necessary to supply some default value, that will avoid the call to DetectCRISocket() and as - // initcfg.NodeRegistration is discarded, setting whatever value here is harmless. - initcfg.NodeRegistration.CRISocket = constants.UnknownCRISocket } return initcfg, nil } diff --git a/cmd/kubeadm/app/util/config/cluster_test.go b/cmd/kubeadm/app/util/config/cluster_test.go index 0fcd93963e0a2..e31ac4dbe06a5 100644 --- a/cmd/kubeadm/app/util/config/cluster_test.go +++ b/cmd/kubeadm/app/util/config/cluster_test.go @@ -49,9 +49,7 @@ var cfgFiles = map[string][]byte{ "InitConfiguration_v1beta3": []byte(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration -nodeRegistration: - criSocket: %s -`, kubeadmapiv1.SchemeGroupVersion.String(), kubeadmconstants.UnknownCRISocket)), +`, kubeadmapiv1.SchemeGroupVersion.String())), "ClusterConfiguration_v1beta3": []byte(fmt.Sprintf(` apiVersion: %s kind: ClusterConfiguration @@ -648,6 +646,12 @@ func TestGetInitConfigurationFromCluster(t *testing.T) { if !rt.newControlPlane && (cfg.LocalAPIEndpoint.AdvertiseAddress != "1.2.3.4" || cfg.LocalAPIEndpoint.BindPort != 1234) { t.Errorf("invalid cfg.LocalAPIEndpoint: %v", cfg.LocalAPIEndpoint) } + if !rt.newControlPlane && (cfg.NodeRegistration.Name != nodeName || cfg.NodeRegistration.CRISocket != "myCRIsocket" || len(cfg.NodeRegistration.Taints) != 1) { + t.Errorf("invalid cfg.NodeRegistration: %v", cfg.NodeRegistration) + } + if rt.newControlPlane && len(cfg.NodeRegistration.CRISocket) > 0 { + t.Errorf("invalid cfg.NodeRegistration.CRISocket: expected empty CRISocket, but got %v", cfg.NodeRegistration.CRISocket) + } if _, ok := cfg.ComponentConfigs[componentconfigs.KubeletGroup]; !ok { t.Errorf("no cfg.ComponentConfigs[%q]", componentconfigs.KubeletGroup) } diff --git a/cmd/kubeadm/app/util/config/common.go b/cmd/kubeadm/app/util/config/common.go index 333a2dd44da26..d038ccf4878f1 100644 --- a/cmd/kubeadm/app/util/config/common.go +++ b/cmd/kubeadm/app/util/config/common.go @@ -44,6 +44,14 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" ) +// LoadOrDefaultConfigurationOptions holds the common LoadOrDefaultConfiguration options. +type LoadOrDefaultConfigurationOptions struct { + // AllowExperimental indicates whether the experimental / work in progress APIs can be used. + AllowExperimental bool + // SkipCRIDetect indicates whether to skip the CRI socket detection when no CRI socket is provided. + SkipCRIDetect bool +} + // MarshalKubeadmConfigObject marshals an Object registered in the kubeadm scheme. If the object is a InitConfiguration or ClusterConfiguration, some extra logic is run func MarshalKubeadmConfigObject(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) { switch internalcfg := obj.(type) { @@ -265,7 +273,7 @@ func MigrateOldConfig(oldConfig []byte, allowExperimental bool) ([]byte, error) } // Migrate InitConfiguration and ClusterConfiguration if there are any in the config if kubeadmutil.GroupVersionKindsHasInitConfiguration(gvks...) || kubeadmutil.GroupVersionKindsHasClusterConfiguration(gvks...) { - o, err := documentMapToInitConfiguration(gvkmap, true, allowExperimental, true) + o, err := documentMapToInitConfiguration(gvkmap, true, allowExperimental, true, false) if err != nil { return []byte{}, err } @@ -278,7 +286,7 @@ func MigrateOldConfig(oldConfig []byte, allowExperimental bool) ([]byte, error) // Migrate JoinConfiguration if there is any if kubeadmutil.GroupVersionKindsHasJoinConfiguration(gvks...) { - o, err := documentMapToJoinConfiguration(gvkmap, true, allowExperimental, true) + o, err := documentMapToJoinConfiguration(gvkmap, true, allowExperimental, true, false) if err != nil { return []byte{}, err } @@ -291,7 +299,7 @@ func MigrateOldConfig(oldConfig []byte, allowExperimental bool) ([]byte, error) // Migrate ResetConfiguration if there is any if kubeadmutil.GroupVersionKindsHasResetConfiguration(gvks...) { - o, err := documentMapToResetConfiguration(gvkmap, true, allowExperimental, true) + o, err := documentMapToResetConfiguration(gvkmap, true, allowExperimental, true, false) if err != nil { return []byte{}, err } @@ -324,21 +332,21 @@ func ValidateConfig(config []byte, allowExperimental bool) error { // Validate InitConfiguration and ClusterConfiguration if there are any in the config if kubeadmutil.GroupVersionKindsHasInitConfiguration(gvks...) || kubeadmutil.GroupVersionKindsHasClusterConfiguration(gvks...) { - if _, err := documentMapToInitConfiguration(gvkmap, true, allowExperimental, true); err != nil { + if _, err := documentMapToInitConfiguration(gvkmap, true, allowExperimental, true, true); err != nil { return err } } // Validate JoinConfiguration if there is any if kubeadmutil.GroupVersionKindsHasJoinConfiguration(gvks...) { - if _, err := documentMapToJoinConfiguration(gvkmap, true, allowExperimental, true); err != nil { + if _, err := documentMapToJoinConfiguration(gvkmap, true, allowExperimental, true, true); err != nil { return err } } // Validate ResetConfiguration if there is any if kubeadmutil.GroupVersionKindsHasResetConfiguration(gvks...) { - if _, err := documentMapToResetConfiguration(gvkmap, true, allowExperimental, true); err != nil { + if _, err := documentMapToResetConfiguration(gvkmap, true, allowExperimental, true, true); err != nil { return err } } diff --git a/cmd/kubeadm/app/util/config/common_test.go b/cmd/kubeadm/app/util/config/common_test.go index 729cbd7281b5d..54102a88d94b1 100644 --- a/cmd/kubeadm/app/util/config/common_test.go +++ b/cmd/kubeadm/app/util/config/common_test.go @@ -510,30 +510,24 @@ func TestValidateConfig(t *testing.T) { cfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration - nodeRegistration: - criSocket: %s name: foo bar # not a valid subdomain - `, gv, constants.UnknownCRISocket)), + `, gv)), expectedError: true, }, { name: "unknown API GVK", - cfg: dedent.Dedent(fmt.Sprintf(` + cfg: dedent.Dedent(` apiVersion: foo/bar # not a valid GroupVersion kind: zzz # not a valid Kind - nodeRegistration: - criSocket: %s - `, constants.UnknownCRISocket)), + `), expectedError: true, }, { name: "legacy API GVK", - cfg: dedent.Dedent(fmt.Sprintf(` + cfg: dedent.Dedent(` apiVersion: kubeadm.k8s.io/v1beta1 # legacy API kind: InitConfiguration - nodeRegistration: - criSocket: %s - `, constants.UnknownCRISocket)), + `), expectedError: true, }, { @@ -542,9 +536,7 @@ func TestValidateConfig(t *testing.T) { apiVersion: %s kind: InitConfiguration foo: bar - nodeRegistration: - criSocket: %s - `, gv, constants.UnknownCRISocket)), + `, gv)), expectedError: true, }, { @@ -552,9 +544,7 @@ func TestValidateConfig(t *testing.T) { cfg: dedent.Dedent(fmt.Sprintf(` apiVersion: %s kind: InitConfiguration - nodeRegistration: - criSocket: %s - `, gv, constants.UnknownCRISocket)), + `, gv)), expectedError: false, }, { diff --git a/cmd/kubeadm/app/util/config/initconfiguration.go b/cmd/kubeadm/app/util/config/initconfiguration.go index 407670426bd6d..9ee852cd5f494 100644 --- a/cmd/kubeadm/app/util/config/initconfiguration.go +++ b/cmd/kubeadm/app/util/config/initconfiguration.go @@ -57,11 +57,11 @@ var ( ) // SetInitDynamicDefaults checks and sets configuration values for the InitConfiguration object -func SetInitDynamicDefaults(cfg *kubeadmapi.InitConfiguration) error { +func SetInitDynamicDefaults(cfg *kubeadmapi.InitConfiguration, skipCRIDetect bool) error { if err := SetBootstrapTokensDynamicDefaults(&cfg.BootstrapTokens); err != nil { return err } - if err := SetNodeRegistrationDynamicDefaults(&cfg.NodeRegistration, true); err != nil { + if err := SetNodeRegistrationDynamicDefaults(&cfg.NodeRegistration, true, skipCRIDetect); err != nil { return err } if err := SetAPIEndpointDynamicDefaults(&cfg.LocalAPIEndpoint); err != nil { @@ -97,7 +97,7 @@ func SetBootstrapTokensDynamicDefaults(cfg *[]bootstraptokenv1.BootstrapToken) e } // SetNodeRegistrationDynamicDefaults checks and sets configuration values for the NodeRegistration object -func SetNodeRegistrationDynamicDefaults(cfg *kubeadmapi.NodeRegistrationOptions, controlPlaneTaint bool) error { +func SetNodeRegistrationDynamicDefaults(cfg *kubeadmapi.NodeRegistrationOptions, controlPlaneTaint, skipCRIDetect bool) error { var err error cfg.Name, err = nodeutil.GetHostname(cfg.Name) if err != nil { @@ -110,6 +110,11 @@ func SetNodeRegistrationDynamicDefaults(cfg *kubeadmapi.NodeRegistrationOptions, } if cfg.CRISocket == "" { + if skipCRIDetect { + klog.V(4).Infof("skip CRI socket detection, fill with the default CRI socket %s", kubeadmconstants.DefaultCRISocket) + cfg.CRISocket = kubeadmconstants.DefaultCRISocket + return nil + } cfg.CRISocket, err = kubeadmruntime.DetectCRISocket() if err != nil { return err @@ -224,7 +229,7 @@ func DefaultedStaticInitConfiguration() (*kubeadmapi.InitConfiguration, error) { } // DefaultedInitConfiguration takes a versioned init config (often populated by flags), defaults it and converts it into internal InitConfiguration -func DefaultedInitConfiguration(versionedInitCfg *kubeadmapiv1.InitConfiguration, versionedClusterCfg *kubeadmapiv1.ClusterConfiguration) (*kubeadmapi.InitConfiguration, error) { +func DefaultedInitConfiguration(versionedInitCfg *kubeadmapiv1.InitConfiguration, versionedClusterCfg *kubeadmapiv1.ClusterConfiguration, skipCRIDetect bool) (*kubeadmapi.InitConfiguration, error) { internalcfg := &kubeadmapi.InitConfiguration{} // Takes passed flags into account; the defaulting is executed once again enforcing assignment of @@ -240,7 +245,7 @@ func DefaultedInitConfiguration(versionedInitCfg *kubeadmapiv1.InitConfiguration } // Applies dynamic defaults to settings not provided with flags - if err := SetInitDynamicDefaults(internalcfg); err != nil { + if err := SetInitDynamicDefaults(internalcfg, skipCRIDetect); err != nil { return nil, err } // Validates cfg (flags/configs + defaults + dynamic defaults) @@ -251,7 +256,7 @@ func DefaultedInitConfiguration(versionedInitCfg *kubeadmapiv1.InitConfiguration } // LoadInitConfigurationFromFile loads a supported versioned InitConfiguration from a file, converts it into internal config, defaults it and verifies it. -func LoadInitConfigurationFromFile(cfgPath string) (*kubeadmapi.InitConfiguration, error) { +func LoadInitConfigurationFromFile(cfgPath string, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.InitConfiguration, error) { klog.V(1).Infof("loading configuration from %q", cfgPath) b, err := os.ReadFile(cfgPath) @@ -259,7 +264,7 @@ func LoadInitConfigurationFromFile(cfgPath string) (*kubeadmapi.InitConfiguratio return nil, errors.Wrapf(err, "unable to read config from %q ", cfgPath) } - return BytesToInitConfiguration(b) + return BytesToInitConfiguration(b, opts.SkipCRIDetect) } // LoadOrDefaultInitConfiguration takes a path to a config file and a versioned configuration that can serve as the default config @@ -267,31 +272,31 @@ func LoadInitConfigurationFromFile(cfgPath string) (*kubeadmapi.InitConfiguratio // The external, versioned configuration is defaulted and converted to the internal type. // Right thereafter, the configuration is defaulted again with dynamic values (like IP addresses of a machine, etc) // Lastly, the internal config is validated and returned. -func LoadOrDefaultInitConfiguration(cfgPath string, versionedInitCfg *kubeadmapiv1.InitConfiguration, versionedClusterCfg *kubeadmapiv1.ClusterConfiguration) (*kubeadmapi.InitConfiguration, error) { +func LoadOrDefaultInitConfiguration(cfgPath string, versionedInitCfg *kubeadmapiv1.InitConfiguration, versionedClusterCfg *kubeadmapiv1.ClusterConfiguration, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.InitConfiguration, error) { if cfgPath != "" { // Loads configuration from config file, if provided // Nb. --config overrides command line flags - return LoadInitConfigurationFromFile(cfgPath) + return LoadInitConfigurationFromFile(cfgPath, opts) } - return DefaultedInitConfiguration(versionedInitCfg, versionedClusterCfg) + return DefaultedInitConfiguration(versionedInitCfg, versionedClusterCfg, opts.SkipCRIDetect) } // BytesToInitConfiguration converts a byte slice to an internal, defaulted and validated InitConfiguration object. // The map may contain many different YAML documents. These YAML documents are parsed one-by-one // and well-known ComponentConfig GroupVersionKinds are stored inside of the internal InitConfiguration struct. // The resulting InitConfiguration is then dynamically defaulted and validated prior to return. -func BytesToInitConfiguration(b []byte) (*kubeadmapi.InitConfiguration, error) { +func BytesToInitConfiguration(b []byte, skipCRIDetect bool) (*kubeadmapi.InitConfiguration, error) { gvkmap, err := kubeadmutil.SplitYAMLDocuments(b) if err != nil { return nil, err } - return documentMapToInitConfiguration(gvkmap, false, false, false) + return documentMapToInitConfiguration(gvkmap, false, false, false, skipCRIDetect) } // documentMapToInitConfiguration converts a map of GVKs and YAML documents to defaulted and validated configuration object. -func documentMapToInitConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecated, allowExperimental, strictErrors bool) (*kubeadmapi.InitConfiguration, error) { +func documentMapToInitConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecated, allowExperimental, strictErrors, skipCRIDetect bool) (*kubeadmapi.InitConfiguration, error) { var initcfg *kubeadmapi.InitConfiguration var clustercfg *kubeadmapi.ClusterConfiguration @@ -370,7 +375,7 @@ func documentMapToInitConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecat } // Applies dynamic defaults to settings not provided with flags - if err := SetInitDynamicDefaults(initcfg); err != nil { + if err := SetInitDynamicDefaults(initcfg, skipCRIDetect); err != nil { return nil, err } diff --git a/cmd/kubeadm/app/util/config/initconfiguration_test.go b/cmd/kubeadm/app/util/config/initconfiguration_test.go index bc7156097df3e..636f0857a86da 100644 --- a/cmd/kubeadm/app/util/config/initconfiguration_test.go +++ b/cmd/kubeadm/app/util/config/initconfiguration_test.go @@ -90,7 +90,11 @@ kubernetesVersion: %s`, kubeadmapiv1.SchemeGroupVersion.String(), certDir, const return } - obj, err := LoadInitConfigurationFromFile(cfgPath) + opts := LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + } + + obj, err := LoadInitConfigurationFromFile(cfgPath, opts) if rt.expectErr { if err == nil { t.Error("Unexpected success") @@ -126,9 +130,6 @@ func TestDefaultTaintsMarshaling(t *testing.T) { APIVersion: kubeadmapiv1.SchemeGroupVersion.String(), Kind: constants.InitConfigurationKind, }, - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - CRISocket: constants.UnknownCRISocket, - }, }, expectedTaintCnt: 1, }, @@ -139,9 +140,6 @@ func TestDefaultTaintsMarshaling(t *testing.T) { APIVersion: kubeadmapiv1.SchemeGroupVersion.String(), Kind: constants.InitConfigurationKind, }, - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - CRISocket: constants.UnknownCRISocket, - }, }, expectedTaintCnt: 1, }, @@ -153,8 +151,7 @@ func TestDefaultTaintsMarshaling(t *testing.T) { Kind: constants.InitConfigurationKind, }, NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - Taints: []v1.Taint{}, - CRISocket: constants.UnknownCRISocket, + Taints: []v1.Taint{}, }, }, expectedTaintCnt: 0, @@ -171,7 +168,6 @@ func TestDefaultTaintsMarshaling(t *testing.T) { {Key: "taint1"}, {Key: "taint2"}, }, - CRISocket: constants.UnknownCRISocket, }, }, expectedTaintCnt: 2, @@ -185,7 +181,7 @@ func TestDefaultTaintsMarshaling(t *testing.T) { t.Fatalf("unexpected error while marshalling to YAML: %v", err) } - cfg, err := BytesToInitConfiguration(b) + cfg, err := BytesToInitConfiguration(b, true) if err != nil { t.Fatalf("unexpected error of BytesToInitConfiguration: %v\nconfig: %s", err, string(b)) } diff --git a/cmd/kubeadm/app/util/config/joinconfiguration.go b/cmd/kubeadm/app/util/config/joinconfiguration.go index 1ef041c76168a..09d5848fd6147 100644 --- a/cmd/kubeadm/app/util/config/joinconfiguration.go +++ b/cmd/kubeadm/app/util/config/joinconfiguration.go @@ -34,12 +34,12 @@ import ( ) // SetJoinDynamicDefaults checks and sets configuration values for the JoinConfiguration object -func SetJoinDynamicDefaults(cfg *kubeadmapi.JoinConfiguration) error { +func SetJoinDynamicDefaults(cfg *kubeadmapi.JoinConfiguration, skipCRIDetect bool) error { addControlPlaneTaint := false if cfg.ControlPlane != nil { addControlPlaneTaint = true } - if err := SetNodeRegistrationDynamicDefaults(&cfg.NodeRegistration, addControlPlaneTaint); err != nil { + if err := SetNodeRegistrationDynamicDefaults(&cfg.NodeRegistration, addControlPlaneTaint, skipCRIDetect); err != nil { return err } @@ -61,18 +61,18 @@ func SetJoinControlPlaneDefaults(cfg *kubeadmapi.JoinControlPlane) error { // Then the external, versioned configuration is defaulted and converted to the internal type. // Right thereafter, the configuration is defaulted again with dynamic values (like IP addresses of a machine, etc) // Lastly, the internal config is validated and returned. -func LoadOrDefaultJoinConfiguration(cfgPath string, defaultversionedcfg *kubeadmapiv1.JoinConfiguration) (*kubeadmapi.JoinConfiguration, error) { +func LoadOrDefaultJoinConfiguration(cfgPath string, defaultversionedcfg *kubeadmapiv1.JoinConfiguration, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.JoinConfiguration, error) { if cfgPath != "" { // Loads configuration from config file, if provided // Nb. --config overrides command line flags, TODO: fix this - return LoadJoinConfigurationFromFile(cfgPath) + return LoadJoinConfigurationFromFile(cfgPath, opts) } - return DefaultedJoinConfiguration(defaultversionedcfg) + return DefaultedJoinConfiguration(defaultversionedcfg, opts) } // LoadJoinConfigurationFromFile loads versioned JoinConfiguration from file, converts it to internal, defaults and validates it -func LoadJoinConfigurationFromFile(cfgPath string) (*kubeadmapi.JoinConfiguration, error) { +func LoadJoinConfigurationFromFile(cfgPath string, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.JoinConfiguration, error) { klog.V(1).Infof("loading configuration from %q", cfgPath) b, err := os.ReadFile(cfgPath) @@ -85,12 +85,12 @@ func LoadJoinConfigurationFromFile(cfgPath string) (*kubeadmapi.JoinConfiguratio return nil, err } - return documentMapToJoinConfiguration(gvkmap, false, false, false) + return documentMapToJoinConfiguration(gvkmap, false, false, false, opts.SkipCRIDetect) } // documentMapToJoinConfiguration takes a map between GVKs and YAML documents (as returned by SplitYAMLDocuments), // finds a JoinConfiguration, decodes it, dynamically defaults it and then validates it prior to return. -func documentMapToJoinConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecated, allowExperimental, strictErrors bool) (*kubeadmapi.JoinConfiguration, error) { +func documentMapToJoinConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecated, allowExperimental, strictErrors, skipCRIDetect bool) (*kubeadmapi.JoinConfiguration, error) { joinBytes := []byte{} for gvk, bytes := range gvkmap { // not interested in anything other than JoinConfiguration @@ -125,7 +125,7 @@ func documentMapToJoinConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecat } // Applies dynamic defaults to settings not provided with flags - if err := SetJoinDynamicDefaults(internalcfg); err != nil { + if err := SetJoinDynamicDefaults(internalcfg, skipCRIDetect); err != nil { return nil, err } // Validates cfg (flags/configs + defaults) @@ -137,7 +137,7 @@ func documentMapToJoinConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecat } // DefaultedJoinConfiguration takes a versioned JoinConfiguration (usually filled in by command line parameters), defaults it, converts it to internal and validates it -func DefaultedJoinConfiguration(defaultversionedcfg *kubeadmapiv1.JoinConfiguration) (*kubeadmapi.JoinConfiguration, error) { +func DefaultedJoinConfiguration(defaultversionedcfg *kubeadmapiv1.JoinConfiguration, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.JoinConfiguration, error) { internalcfg := &kubeadmapi.JoinConfiguration{} // Takes passed flags into account; the defaulting is executed once again enforcing assignment of @@ -148,7 +148,7 @@ func DefaultedJoinConfiguration(defaultversionedcfg *kubeadmapiv1.JoinConfigurat } // Applies dynamic defaults to settings not provided with flags - if err := SetJoinDynamicDefaults(internalcfg); err != nil { + if err := SetJoinDynamicDefaults(internalcfg, opts.SkipCRIDetect); err != nil { return nil, err } // Validates cfg (flags/configs + defaults) diff --git a/cmd/kubeadm/app/util/config/joinconfiguration_test.go b/cmd/kubeadm/app/util/config/joinconfiguration_test.go index 1b61929439f0d..9bd5e6da4ce37 100644 --- a/cmd/kubeadm/app/util/config/joinconfiguration_test.go +++ b/cmd/kubeadm/app/util/config/joinconfiguration_test.go @@ -78,7 +78,11 @@ func TestLoadJoinConfigurationFromFile(t *testing.T) { return } - obj, err := LoadJoinConfigurationFromFile(cfgPath) + opts := LoadOrDefaultConfigurationOptions{ + SkipCRIDetect: true, + } + + obj, err := LoadJoinConfigurationFromFile(cfgPath, opts) if rt.expectErr { if err == nil { t.Error("Unexpected success") diff --git a/cmd/kubeadm/app/util/config/resetconfiguration.go b/cmd/kubeadm/app/util/config/resetconfiguration.go index f6178a8beb9d7..9335db9c26487 100644 --- a/cmd/kubeadm/app/util/config/resetconfiguration.go +++ b/cmd/kubeadm/app/util/config/resetconfiguration.go @@ -36,9 +36,14 @@ import ( ) // SetResetDynamicDefaults checks and sets configuration values for the ResetConfiguration object -func SetResetDynamicDefaults(cfg *kubeadmapi.ResetConfiguration) error { +func SetResetDynamicDefaults(cfg *kubeadmapi.ResetConfiguration, skipCRIDetect bool) error { var err error if cfg.CRISocket == "" { + if skipCRIDetect { + klog.V(4).Infof("skip CRI socket detection, fill with the default CRI socket %s", constants.DefaultCRISocket) + cfg.CRISocket = constants.DefaultCRISocket + return nil + } cfg.CRISocket, err = kubeadmruntime.DetectCRISocket() if err != nil { return err @@ -60,17 +65,17 @@ func SetResetDynamicDefaults(cfg *kubeadmapi.ResetConfiguration) error { // Then the external, versioned configuration is defaulted and converted to the internal type. // Right thereafter, the configuration is defaulted again with dynamic values // Lastly, the internal config is validated and returned. -func LoadOrDefaultResetConfiguration(cfgPath string, defaultversionedcfg *kubeadmapiv1.ResetConfiguration, allowExperimental bool) (*kubeadmapi.ResetConfiguration, error) { +func LoadOrDefaultResetConfiguration(cfgPath string, defaultversionedcfg *kubeadmapiv1.ResetConfiguration, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.ResetConfiguration, error) { if cfgPath != "" { // Loads configuration from config file, if provided - return LoadResetConfigurationFromFile(cfgPath, allowExperimental) + return LoadResetConfigurationFromFile(cfgPath, opts) } - return DefaultedResetConfiguration(defaultversionedcfg) + return DefaultedResetConfiguration(defaultversionedcfg, opts) } // LoadResetConfigurationFromFile loads versioned ResetConfiguration from file, converts it to internal, defaults and validates it -func LoadResetConfigurationFromFile(cfgPath string, allowExperimental bool) (*kubeadmapi.ResetConfiguration, error) { +func LoadResetConfigurationFromFile(cfgPath string, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.ResetConfiguration, error) { klog.V(1).Infof("loading configuration from %q", cfgPath) b, err := os.ReadFile(cfgPath) @@ -83,12 +88,12 @@ func LoadResetConfigurationFromFile(cfgPath string, allowExperimental bool) (*ku return nil, err } - return documentMapToResetConfiguration(gvkmap, false, allowExperimental, false) + return documentMapToResetConfiguration(gvkmap, false, opts.AllowExperimental, false, opts.SkipCRIDetect) } // documentMapToResetConfiguration takes a map between GVKs and YAML documents (as returned by SplitYAMLDocuments), // finds a ResetConfiguration, decodes it, dynamically defaults it and then validates it prior to return. -func documentMapToResetConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecated, allowExperimental bool, strictErrors bool) (*kubeadmapi.ResetConfiguration, error) { +func documentMapToResetConfiguration(gvkmap kubeadmapi.DocumentMap, allowDeprecated, allowExperimental bool, strictErrors bool, skipCRIDetect bool) (*kubeadmapi.ResetConfiguration, error) { resetBytes := []byte{} for gvk, bytes := range gvkmap { // not interested in anything other than ResetConfiguration @@ -123,7 +128,7 @@ func documentMapToResetConfiguration(gvkmap kubeadmapi.DocumentMap, allowDepreca } // Applies dynamic defaults to settings not provided with flags - if err := SetResetDynamicDefaults(internalcfg); err != nil { + if err := SetResetDynamicDefaults(internalcfg, skipCRIDetect); err != nil { return nil, err } // Validates cfg @@ -135,7 +140,7 @@ func documentMapToResetConfiguration(gvkmap kubeadmapi.DocumentMap, allowDepreca } // DefaultedResetConfiguration takes a versioned ResetConfiguration (usually filled in by command line parameters), defaults it, converts it to internal and validates it -func DefaultedResetConfiguration(defaultversionedcfg *kubeadmapiv1.ResetConfiguration) (*kubeadmapi.ResetConfiguration, error) { +func DefaultedResetConfiguration(defaultversionedcfg *kubeadmapiv1.ResetConfiguration, opts LoadOrDefaultConfigurationOptions) (*kubeadmapi.ResetConfiguration, error) { internalcfg := &kubeadmapi.ResetConfiguration{} // Takes passed flags into account; the defaulting is executed once again enforcing assignment of @@ -146,7 +151,7 @@ func DefaultedResetConfiguration(defaultversionedcfg *kubeadmapiv1.ResetConfigur } // Applies dynamic defaults to settings not provided with flags - if err := SetResetDynamicDefaults(internalcfg); err != nil { + if err := SetResetDynamicDefaults(internalcfg, opts.SkipCRIDetect); err != nil { return nil, err } // Validates cfg diff --git a/cmd/kubeadm/app/util/config/resetconfiguration_test.go b/cmd/kubeadm/app/util/config/resetconfiguration_test.go index 12ed38be2bd02..a04f81f7acc67 100644 --- a/cmd/kubeadm/app/util/config/resetconfiguration_test.go +++ b/cmd/kubeadm/app/util/config/resetconfiguration_test.go @@ -75,7 +75,12 @@ func TestLoadResetConfigurationFromFile(t *testing.T) { return } - obj, err := LoadResetConfigurationFromFile(cfgPath, true) + opts := LoadOrDefaultConfigurationOptions{ + AllowExperimental: true, + SkipCRIDetect: true, + } + + obj, err := LoadResetConfigurationFromFile(cfgPath, opts) if rt.expectErr { if err == nil { t.Error("Unexpected success") diff --git a/cmd/kubeadm/app/util/env.go b/cmd/kubeadm/app/util/env.go index ea8de76d2eb53..5534b1d212af1 100644 --- a/cmd/kubeadm/app/util/env.go +++ b/cmd/kubeadm/app/util/env.go @@ -21,11 +21,13 @@ import ( "strings" v1 "k8s.io/api/core/v1" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) // GetProxyEnvVars builds a list of environment variables in order to use the right proxy -func GetProxyEnvVars() []v1.EnvVar { - envs := []v1.EnvVar{} +func GetProxyEnvVars() []kubeadmapi.EnvVar { + envs := []kubeadmapi.EnvVar{} for _, env := range os.Environ() { pos := strings.Index(env, "=") if pos == -1 { @@ -35,20 +37,23 @@ func GetProxyEnvVars() []v1.EnvVar { name := env[:pos] value := env[pos+1:] if strings.HasSuffix(strings.ToLower(name), "_proxy") && value != "" { - envVar := v1.EnvVar{Name: name, Value: value} + envVar := kubeadmapi.EnvVar{ + EnvVar: v1.EnvVar{Name: name, Value: value}, + } envs = append(envs, envVar) } } return envs } -// MergeEnv merges values of environment variable slices. The values defined in later slices overwrite values in previous ones. -func MergeEnv(envList ...[]v1.EnvVar) []v1.EnvVar { +// MergeKubeadmEnvVars merges values of environment variable slices. +// The values defined in later slices overwrite values in previous ones. +func MergeKubeadmEnvVars(envList ...[]kubeadmapi.EnvVar) []v1.EnvVar { m := make(map[string]v1.EnvVar) merged := []v1.EnvVar{} for _, envs := range envList { for _, env := range envs { - m[env.Name] = env + m[env.Name] = env.EnvVar } } for _, v := range m { diff --git a/cmd/kubeadm/app/util/env_test.go b/cmd/kubeadm/app/util/env_test.go index e1bc6a5aad6d6..ad70a4e858470 100644 --- a/cmd/kubeadm/app/util/env_test.go +++ b/cmd/kubeadm/app/util/env_test.go @@ -22,26 +22,34 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) -func TestMergeEnv(t *testing.T) { - baseEnv := []v1.EnvVar{} - extraEnv := []v1.EnvVar{} - MergeEnv(append(baseEnv, extraEnv...)) +func TestMergeKubeadmEnvVars(t *testing.T) { + baseEnv := []kubeadmapi.EnvVar{} + extraEnv := []kubeadmapi.EnvVar{} + MergeKubeadmEnvVars(append(baseEnv, extraEnv...)) var tests = []struct { name string - proxyEnv []v1.EnvVar - extraEnv []v1.EnvVar + proxyEnv []kubeadmapi.EnvVar + extraEnv []kubeadmapi.EnvVar mergedEnv []v1.EnvVar }{ { name: "normal case without duplicated env", - proxyEnv: []v1.EnvVar{ - {Name: "Foo1", Value: "Bar1"}, - {Name: "Foo2", Value: "Bar2"}, + proxyEnv: []kubeadmapi.EnvVar{ + { + EnvVar: v1.EnvVar{Name: "Foo1", Value: "Bar1"}, + }, + { + EnvVar: v1.EnvVar{Name: "Foo2", Value: "Bar2"}, + }, }, - extraEnv: []v1.EnvVar{ - {Name: "Foo3", Value: "Bar3"}, + extraEnv: []kubeadmapi.EnvVar{ + { + EnvVar: v1.EnvVar{Name: "Foo3", Value: "Bar3"}, + }, }, mergedEnv: []v1.EnvVar{ {Name: "Foo1", Value: "Bar1"}, @@ -51,12 +59,18 @@ func TestMergeEnv(t *testing.T) { }, { name: "extraEnv env take precedence over the proxyEnv", - proxyEnv: []v1.EnvVar{ - {Name: "Foo1", Value: "Bar1"}, - {Name: "Foo2", Value: "Bar2"}, + proxyEnv: []kubeadmapi.EnvVar{ + { + EnvVar: v1.EnvVar{Name: "Foo1", Value: "Bar1"}, + }, + { + EnvVar: v1.EnvVar{Name: "Foo2", Value: "Bar2"}, + }, }, - extraEnv: []v1.EnvVar{ - {Name: "Foo2", Value: "Bar3"}, + extraEnv: []kubeadmapi.EnvVar{ + { + EnvVar: v1.EnvVar{Name: "Foo2", Value: "Bar3"}, + }, }, mergedEnv: []v1.EnvVar{ {Name: "Foo1", Value: "Bar1"}, @@ -67,7 +81,7 @@ func TestMergeEnv(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - envs := MergeEnv(test.proxyEnv, test.extraEnv) + envs := MergeKubeadmEnvVars(test.proxyEnv, test.extraEnv) if !assert.ElementsMatch(t, envs, test.mergedEnv) { t.Errorf("expected env: %v, got: %v", test.mergedEnv, envs) } diff --git a/cmd/kubeadm/app/util/error.go b/cmd/kubeadm/app/util/error.go index ea0ae67091ba0..17271f024d310 100644 --- a/cmd/kubeadm/app/util/error.go +++ b/cmd/kubeadm/app/util/error.go @@ -38,8 +38,10 @@ const ( ) var ( + // ErrInvalidSubCommandMsg is an error message returned on invalid subcommands ErrInvalidSubCommandMsg = "invalid subcommand" - ErrExit = errors.New("exit") + // ErrExit is an error returned when kubeadm is about to exit + ErrExit = errors.New("exit") ) // fatal prints the message if set and then exits. diff --git a/cmd/kubeadm/app/util/etcd/etcd.go b/cmd/kubeadm/app/util/etcd/etcd.go index f26c7841efc05..50784fda107ba 100644 --- a/cmd/kubeadm/app/util/etcd/etcd.go +++ b/cmd/kubeadm/app/util/etcd/etcd.go @@ -53,6 +53,8 @@ var etcdBackoff = wait.Backoff{ Jitter: 0.1, } +// ErrNoMemberIDForPeerURL is returned when it is not possible to obtain a member ID +// from a given peer URL var ErrNoMemberIDForPeerURL = errors.New("no member id found for peer URL") // ClusterInterrogator is an interface to get etcd cluster related information diff --git a/cmd/kubeadm/app/util/etcd/etcd_test.go b/cmd/kubeadm/app/util/etcd/etcd_test.go index ac05118c0797c..4ebff47ea8637 100644 --- a/cmd/kubeadm/app/util/etcd/etcd_test.go +++ b/cmd/kubeadm/app/util/etcd/etcd_test.go @@ -135,7 +135,7 @@ func TestGetClientURL(t *testing.T) { } func TestGetPeerURL(t *testing.T) { - testGetURL(t, GetClientURL, constants.EtcdListenClientPort) + testGetURL(t, GetPeerURL, constants.EtcdListenPeerPort) } func TestGetClientURLByIP(t *testing.T) { diff --git a/cmd/kubeadm/app/util/initsystem/initsystem_unix.go b/cmd/kubeadm/app/util/initsystem/initsystem_unix.go index 40acb69fed303..04c21772127b0 100644 --- a/cmd/kubeadm/app/util/initsystem/initsystem_unix.go +++ b/cmd/kubeadm/app/util/initsystem/initsystem_unix.go @@ -125,7 +125,7 @@ func (sysd SystemdInitSystem) ServiceExists(service string) bool { args := []string{"status", service} outBytes, _ := exec.Command("systemctl", args...).Output() output := string(outBytes) - return !strings.Contains(output, "Loaded: not-found") + return !strings.Contains(output, "Loaded: not-found") && !strings.Contains(output, "could not be found") } // ServiceIsEnabled ensures the service is enabled to start on each boot. diff --git a/cmd/kubeadm/app/util/marshal.go b/cmd/kubeadm/app/util/marshal.go index 6be6b87ade80a..c3bc36f5f7977 100644 --- a/cmd/kubeadm/app/util/marshal.go +++ b/cmd/kubeadm/app/util/marshal.go @@ -54,23 +54,11 @@ func MarshalToYamlForCodecs(obj runtime.Object, gv schema.GroupVersion, codecs s return runtime.Encode(encoder, obj) } -// UnmarshalFromYaml unmarshals yaml into an object. -func UnmarshalFromYaml(buffer []byte, gv schema.GroupVersion) (runtime.Object, error) { - return UnmarshalFromYamlForCodecs(buffer, gv, clientsetscheme.Codecs) -} - -// UnmarshalFromYamlForCodecs unmarshals yaml into an object using the specified codec -// TODO: Is specifying the gv really needed here? -// TODO: Can we support json out of the box easily here? -func UnmarshalFromYamlForCodecs(buffer []byte, gv schema.GroupVersion, codecs serializer.CodecFactory) (runtime.Object, error) { - const mediaType = runtime.ContentTypeYAML - info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) - if !ok { - return nil, errors.Errorf("unsupported media type %q", mediaType) - } - - decoder := codecs.DecoderToVersion(info.Serializer, gv) - obj, err := runtime.Decode(decoder, buffer) +// UniversalUnmarshal unmarshals YAML or JSON into a runtime.Object using the universal deserializer. +func UniversalUnmarshal(buffer []byte) (runtime.Object, error) { + codecs := clientsetscheme.Codecs + decoder := codecs.UniversalDeserializer() + obj, _, err := decoder.Decode(buffer, nil, nil) if err != nil { return nil, errors.Wrapf(err, "failed to decode %s into runtime.Object", buffer) } diff --git a/cmd/kubeadm/app/util/marshal_test.go b/cmd/kubeadm/app/util/marshal_test.go index 4cb2aa3f42e10..ee3aac68aac6c 100644 --- a/cmd/kubeadm/app/util/marshal_test.go +++ b/cmd/kubeadm/app/util/marshal_test.go @@ -24,13 +24,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) @@ -84,7 +80,7 @@ func TestMarshalUnmarshalYaml(t *testing.T) { t.Logf("\n%s", bytes) - obj2, err := UnmarshalFromYaml(bytes, corev1.SchemeGroupVersion) + obj2, err := UniversalUnmarshal(bytes) if err != nil { t.Fatalf("unexpected error marshalling: %v", err) } @@ -111,50 +107,48 @@ func TestMarshalUnmarshalYaml(t *testing.T) { } } -func TestMarshalUnmarshalToYamlForCodecs(t *testing.T) { - cfg := &kubeadmapiv1.InitConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: constants.InitConfigurationKind, - APIVersion: kubeadmapiv1.SchemeGroupVersion.String(), - }, - NodeRegistration: kubeadmapiv1.NodeRegistrationOptions{ - Name: "testNode", - CRISocket: "unix:///var/run/cri.sock", - }, - BootstrapTokens: []bootstraptokenv1.BootstrapToken{ - { - Token: &bootstraptokenv1.BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, - }, - }, - // NOTE: Using MarshalToYamlForCodecs and UnmarshalFromYamlForCodecs for ClusterConfiguration fields here won't work - // by design. This is because we have a `json:"-"` annotation in order to avoid struct duplication. See the comment - // at the kubeadmapiv1.InitConfiguration definition. +func TestUnmarshalJson(t *testing.T) { + bytes := []byte(string(`{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "someName", + "namespace": "testNamespace", + "labels": { + "test": "yes" } + }, + "spec": { + "restartPolicy": "Always" + } +}`)) - kubeadmapiv1.SetDefaults_InitConfiguration(cfg) - scheme := runtime.NewScheme() - if err := kubeadmapiv1.AddToScheme(scheme); err != nil { - t.Fatal(err) - } - codecs := serializer.NewCodecFactory(scheme) + t.Logf("\n%s", bytes) - bytes, err := MarshalToYamlForCodecs(cfg, kubeadmapiv1.SchemeGroupVersion, codecs) + obj2, err := UniversalUnmarshal(bytes) if err != nil { - t.Fatalf("unexpected error marshalling InitConfiguration: %v", err) + t.Fatalf("unexpected error marshalling: %v", err) } - t.Logf("\n%s", bytes) - obj, err := UnmarshalFromYamlForCodecs(bytes, kubeadmapiv1.SchemeGroupVersion, codecs) - if err != nil { - t.Fatalf("unexpected error unmarshalling InitConfiguration: %v", err) + pod2, ok := obj2.(*corev1.Pod) + if !ok { + t.Fatal("did not get a Pod") + } + + if pod2.Name != "someName" { + t.Errorf("expected someName, got %q", pod2.Name) + } + + if pod2.Namespace != "testNamespace" { + t.Errorf("expected testNamespace, got %q", pod2.Namespace) } - cfg2, ok := obj.(*kubeadmapiv1.InitConfiguration) - if !ok || cfg2 == nil { - t.Fatal("did not get InitConfiguration back") + if !reflect.DeepEqual(pod2.Labels, map[string]string{"test": "yes"}) { + t.Errorf("expected [test:yes], got %v", pod2.Labels) } - if !reflect.DeepEqual(*cfg, *cfg2) { - t.Errorf("expected %v, got %v", *cfg, *cfg2) + + if pod2.Spec.RestartPolicy != "Always" { + t.Errorf("expected Always, got %q", pod2.Spec.RestartPolicy) } } diff --git a/cmd/kubeadm/app/util/patches/patches.go b/cmd/kubeadm/app/util/patches/patches.go index fe7b7710c02de..e28656457e2ba 100644 --- a/cmd/kubeadm/app/util/patches/patches.go +++ b/cmd/kubeadm/app/util/patches/patches.go @@ -75,6 +75,7 @@ func (ps *patchSet) String() string { ) } +// KubeletConfiguration defines the kubeletconfiguration patch target. const KubeletConfiguration = "kubeletconfiguration" var ( @@ -102,6 +103,7 @@ var ( } ) +// KnownTargets returns the locally defined knownTargets. func KnownTargets() []string { return knownTargets } diff --git a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go index dc78d1b3f6b86..5fa3de6a53b35 100644 --- a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go +++ b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go @@ -60,16 +60,16 @@ const ( rsaKeySize = 2048 ) -// CertConfig is a wrapper around certutil.Config extending it with PublicKeyAlgorithm. +// CertConfig is a wrapper around certutil.Config extending it with EncryptionAlgorithm. type CertConfig struct { certutil.Config - NotAfter *time.Time - PublicKeyAlgorithm x509.PublicKeyAlgorithm + NotAfter *time.Time + EncryptionAlgorithm kubeadmapi.EncryptionAlgorithmType } // NewCertificateAuthority creates new certificate and private key for the certificate authority func NewCertificateAuthority(config *CertConfig) (*x509.Certificate, crypto.Signer, error) { - key, err := NewPrivateKey(config.PublicKeyAlgorithm) + key, err := NewPrivateKey(config.EncryptionAlgorithm) if err != nil { return nil, nil, errors.Wrap(err, "unable to create private key while generating CA certificate") } @@ -86,7 +86,7 @@ func NewCertificateAuthority(config *CertConfig) (*x509.Certificate, crypto.Sign // NewIntermediateCertificateAuthority creates new certificate and private key for an intermediate certificate authority func NewIntermediateCertificateAuthority(parentCert *x509.Certificate, parentKey crypto.Signer, config *CertConfig) (*x509.Certificate, crypto.Signer, error) { - key, err := NewPrivateKey(config.PublicKeyAlgorithm) + key, err := NewPrivateKey(config.EncryptionAlgorithm) if err != nil { return nil, nil, errors.Wrap(err, "unable to create private key while generating intermediate CA certificate") } @@ -105,7 +105,7 @@ func NewCertAndKey(caCert *x509.Certificate, caKey crypto.Signer, config *CertCo return nil, nil, errors.New("must specify at least one ExtKeyUsage") } - key, err := NewPrivateKey(config.PublicKeyAlgorithm) + key, err := NewPrivateKey(config.EncryptionAlgorithm) if err != nil { return nil, nil, errors.Wrap(err, "unable to create private key") } @@ -120,7 +120,7 @@ func NewCertAndKey(caCert *x509.Certificate, caKey crypto.Signer, config *CertCo // NewCSRAndKey generates a new key and CSR and that could be signed to create the given certificate func NewCSRAndKey(config *CertConfig) (*x509.CertificateRequest, crypto.Signer, error) { - key, err := NewPrivateKey(config.PublicKeyAlgorithm) + key, err := NewPrivateKey(config.EncryptionAlgorithm) if err != nil { return nil, nil, errors.Wrap(err, "unable to create private key") } @@ -623,8 +623,9 @@ func EncodePublicKeyPEM(key crypto.PublicKey) ([]byte, error) { // NewPrivateKey returns a new private key. var NewPrivateKey = GeneratePrivateKey -func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { - if keyType == x509.ECDSA { +// GeneratePrivateKey is the default function for generating private keys. +func GeneratePrivateKey(keyType kubeadmapi.EncryptionAlgorithmType) (crypto.Signer, error) { + if keyType == kubeadmapi.EncryptionAlgorithmECDSA { return ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) } diff --git a/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go b/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go index 9a4590814be02..4b17b336104fd 100644 --- a/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go +++ b/cmd/kubeadm/app/util/pkiutil/pki_helpers_test.go @@ -52,7 +52,7 @@ func TestMain(m *testing.M) { Config: certutil.Config{ CommonName: "Root CA 1", }, - PublicKeyAlgorithm: x509.RSA, + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmRSA, }) if err != nil { panic(fmt.Sprintf("Failed generating Root CA: %v", err)) @@ -112,7 +112,7 @@ func TestHasServerAuth(t *testing.T) { // Override NewPrivateKey to reuse the same key for all certs // since this test is only checking cert.ExtKeyUsage privateKeyFunc := NewPrivateKey - NewPrivateKey = func(x509.PublicKeyAlgorithm) (crypto.Signer, error) { + NewPrivateKey = func(kubeadmapi.EncryptionAlgorithmType) (crypto.Signer, error) { return rootCAKey, nil } defer func() { @@ -141,7 +141,7 @@ func TestHasServerAuth(t *testing.T) { CommonName: "test", Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, }, - PublicKeyAlgorithm: x509.ECDSA, + EncryptionAlgorithm: kubeadmapi.EncryptionAlgorithmECDSA, }, expected: true, }, diff --git a/cmd/kubeadm/app/util/pkiutil/testing/testing.go b/cmd/kubeadm/app/util/pkiutil/testing/testing.go index 6ddd943432310..588aaacaef9d9 100644 --- a/cmd/kubeadm/app/util/pkiutil/testing/testing.go +++ b/cmd/kubeadm/app/util/pkiutil/testing/testing.go @@ -18,7 +18,6 @@ package testing import ( "crypto" - "crypto/x509" "fmt" "os" "path/filepath" @@ -29,6 +28,7 @@ import ( "sync" "testing" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" ) @@ -75,7 +75,7 @@ func install() (cleanup func()) { } } -func newPrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { +func newPrivateKey(keyType kubeadmapi.EncryptionAlgorithmType) (crypto.Signer, error) { lock.Lock() defer lock.Unlock() @@ -108,7 +108,7 @@ func newPrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { keyName := "" switch keyType { - case x509.ECDSA: + case kubeadmapi.EncryptionAlgorithmECDSA: ecdsa++ keyName = fmt.Sprintf("%d.ecdsa", ecdsa) default: diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go index 7c58a442ca48f..ea2b13f4b1698 100644 --- a/cmd/kubeadm/app/util/staticpod/utils.go +++ b/cmd/kubeadm/app/util/staticpod/utils.go @@ -29,15 +29,14 @@ import ( "strings" "sync" - "github.com/google/go-cmp/cmp" "github.com/pkg/errors" + "github.com/pmezard/go-difflib/difflib" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/dump" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/klog/v2" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -190,9 +189,9 @@ func PatchStaticPod(pod *v1.Pod, patchesDir string, output io.Writer) (*v1.Pod, return pod, err } - obj, err := kubeadmutil.UnmarshalFromYaml(patchTarget.Data, v1.SchemeGroupVersion) + obj, err := kubeadmutil.UniversalUnmarshal(patchTarget.Data) if err != nil { - return pod, errors.Wrap(err, "failed to unmarshal patched manifest from YAML") + return pod, errors.Wrap(err, "failed to unmarshal patched manifest") } pod2, ok := obj.(*v1.Pod) @@ -233,12 +232,15 @@ func ReadStaticPodFromDisk(manifestPath string) (*v1.Pod, error) { return &v1.Pod{}, errors.Wrapf(err, "failed to read manifest for %q", manifestPath) } - obj, err := kubeadmutil.UnmarshalFromYaml(buf, v1.SchemeGroupVersion) + obj, err := kubeadmutil.UniversalUnmarshal(buf) if err != nil { - return &v1.Pod{}, errors.Errorf("failed to unmarshal manifest for %q from YAML: %v", manifestPath, err) + return &v1.Pod{}, errors.Errorf("failed to unmarshal manifest for %q: %v", manifestPath, err) } - pod := obj.(*v1.Pod) + pod, ok := obj.(*v1.Pod) + if !ok { + return &v1.Pod{}, errors.Errorf("failed to parse Pod object defined in %q", manifestPath) + } return pod, nil } @@ -354,14 +356,14 @@ func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int32, v1. } // ManifestFilesAreEqual compares 2 files. It returns true if their contents are equal, false otherwise -func ManifestFilesAreEqual(path1, path2 string) (bool, error) { +func ManifestFilesAreEqual(path1, path2 string) (bool, string, error) { pod1, err := ReadStaticPodFromDisk(path1) if err != nil { - return false, err + return false, "", err } pod2, err := ReadStaticPodFromDisk(path2) if err != nil { - return false, err + return false, "", err } hasher := md5.New() @@ -370,10 +372,30 @@ func ManifestFilesAreEqual(path1, path2 string) (bool, error) { DeepHashObject(hasher, pod2) hash2 := hasher.Sum(nil)[0:] if bytes.Equal(hash1, hash2) { - return true, nil + return true, "", nil + } + + manifest1, err := kubeadmutil.MarshalToYaml(pod1, v1.SchemeGroupVersion) + if err != nil { + return false, "", errors.Wrapf(err, "failed to marshal Pod manifest for %q to YAML", path1) + } + + manifest2, err := kubeadmutil.MarshalToYaml(pod2, v1.SchemeGroupVersion) + if err != nil { + return false, "", errors.Wrapf(err, "failed to marshal Pod manifest for %q to YAML", path2) + } + + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines(string(manifest1)), + B: difflib.SplitLines(string(manifest2)), } - klog.V(4).Infof("Pod manifest files diff:\n%s\n", cmp.Diff(pod1, pod2)) - return false, nil + + diffStr, err := difflib.GetUnifiedDiffString(diff) + if err != nil { + return false, "", errors.Wrapf(err, "failed to generate the differences between manifest %q and manifest %q", path1, path2) + } + + return false, diffStr, nil } // getProbeAddress returns a valid probe address. @@ -389,6 +411,8 @@ func getProbeAddress(addr string) string { return addr } +// GetUsersAndGroups returns the local usersAndGroups, but first creates it +// in a thread safe way once. func GetUsersAndGroups() (*users.UsersAndGroups, error) { var err error usersAndGroupsOnce.Do(func() { diff --git a/cmd/kubeadm/app/util/staticpod/utils_linux.go b/cmd/kubeadm/app/util/staticpod/utils_linux.go index 661e148b8cc17..aebf04dd8bc90 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_linux.go +++ b/cmd/kubeadm/app/util/staticpod/utils_linux.go @@ -26,7 +26,7 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -140,7 +140,7 @@ func runKubeControllerManagerAsNonRoot(pod *v1.Pod, runAsUser, runAsGroup, suppl } } pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{ - AllowPrivilegeEscalation: pointer.Bool(false), + AllowPrivilegeEscalation: ptr.To(false), Capabilities: &v1.Capabilities{ // We drop all capabilities that are added by default. Drop: []v1.Capability{"ALL"}, @@ -159,7 +159,7 @@ func runKubeSchedulerAsNonRoot(pod *v1.Pod, runAsUser, runAsGroup *int64, update return err } pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{ - AllowPrivilegeEscalation: pointer.Bool(false), + AllowPrivilegeEscalation: ptr.To(false), // We drop all capabilities that are added by default. Capabilities: &v1.Capabilities{ Drop: []v1.Capability{"ALL"}, @@ -184,7 +184,7 @@ func runEtcdAsNonRoot(pod *v1.Pod, runAsUser, runAsGroup *int64, updatePathOwner return err } pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{ - AllowPrivilegeEscalation: pointer.Bool(false), + AllowPrivilegeEscalation: ptr.To(false), // We drop all capabilities that are added by default. Capabilities: &v1.Capabilities{ Drop: []v1.Capability{"ALL"}, diff --git a/cmd/kubeadm/app/util/staticpod/utils_linux_test.go b/cmd/kubeadm/app/util/staticpod/utils_linux_test.go index 2eb5dc2da4382..29a3e675e1ea9 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_linux_test.go +++ b/cmd/kubeadm/app/util/staticpod/utils_linux_test.go @@ -25,7 +25,7 @@ import ( "testing" v1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -40,8 +40,8 @@ type ownerAndPermissions struct { func verifyPodSecurityContext(t *testing.T, pod *v1.Pod, wantRunAsUser, wantRunAsGroup int64, wantSupGroup []int64) { t.Helper() wantPodSecurityContext := &v1.PodSecurityContext{ - RunAsUser: pointer.Int64(wantRunAsUser), - RunAsGroup: pointer.Int64(wantRunAsGroup), + RunAsUser: ptr.To(wantRunAsUser), + RunAsGroup: ptr.To(wantRunAsGroup), SupplementalGroups: wantSupGroup, SeccompProfile: &v1.SeccompProfile{ Type: v1.SeccompProfileTypeRuntimeDefault, @@ -109,7 +109,7 @@ func TestRunKubeControllerManagerAsNonRoot(t *testing.T) { t.Fatal(err) } verifyPodSecurityContext(t, &pod, runAsUser, runAsGroup, []int64{supGroup}) - verifyContainerSecurityContext(t, pod.Spec.Containers[0], nil, []v1.Capability{"ALL"}, pointer.Bool(false)) + verifyContainerSecurityContext(t, pod.Spec.Containers[0], nil, []v1.Capability{"ALL"}, ptr.To(false)) wantUpdateFiles := map[string]ownerAndPermissions{ filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName): {uid: runAsUser, gid: runAsGroup, permissions: 0600}, filepath.Join(cfg.CertificatesDir, kubeadmconstants.ServiceAccountPrivateKeyName): {uid: 0, gid: supGroup, permissions: 0640}, @@ -129,7 +129,7 @@ func TestRunKubeSchedulerAsNonRoot(t *testing.T) { t.Fatal(err) } verifyPodSecurityContext(t, &pod, runAsUser, runAsGroup, nil) - verifyContainerSecurityContext(t, pod.Spec.Containers[0], nil, []v1.Capability{"ALL"}, pointer.Bool(false)) + verifyContainerSecurityContext(t, pod.Spec.Containers[0], nil, []v1.Capability{"ALL"}, ptr.To(false)) wantUpdateFiles := map[string]ownerAndPermissions{ filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.SchedulerKubeConfigFileName): {uid: runAsUser, gid: runAsGroup, permissions: 0600}, } @@ -158,7 +158,7 @@ func TestRunEtcdAsNonRoot(t *testing.T) { t.Fatal(err) } verifyPodSecurityContext(t, &pod, runAsUser, runAsGroup, nil) - verifyContainerSecurityContext(t, pod.Spec.Containers[0], nil, []v1.Capability{"ALL"}, pointer.Bool(false)) + verifyContainerSecurityContext(t, pod.Spec.Containers[0], nil, []v1.Capability{"ALL"}, ptr.To(false)) wantUpdateFiles := map[string]ownerAndPermissions{ cfg.Etcd.Local.DataDir: {uid: runAsUser, gid: runAsGroup, permissions: 0700}, filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName): {uid: runAsUser, gid: runAsGroup, permissions: 0600}, diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go index 1dd6863e6eb27..0af59683423a3 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_test.go +++ b/cmd/kubeadm/app/util/staticpod/utils_test.go @@ -23,6 +23,7 @@ import ( "reflect" "sort" "strconv" + "strings" "testing" v1 "k8s.io/api/core/v1" @@ -652,6 +653,22 @@ spec: - image: gcr.io/google_containers/etcd-amd64:3.1.11 status: {} ` + invalidWithDefaultFields = ` +apiVersion: v1 +kind: Pod +metadata: + labels: + tier: control-plane + component: etcd + name: etcd + namespace: kube-system +spec: + containers: + - image: gcr.io/google_containers/etcd-amd64:3.1.11 + restartPolicy: "Always" +status: {} +` + validPod2 = ` apiVersion: v1 kind: Pod @@ -729,6 +746,7 @@ func TestManifestFilesAreEqual(t *testing.T) { description string podYamls []string expectedResult bool + expectedDiff string expectErr bool }{ { @@ -748,6 +766,19 @@ func TestManifestFilesAreEqual(t *testing.T) { podYamls: []string{validPod, validPod2}, expectedResult: false, expectErr: false, + expectedDiff: `@@ -12 +12 @@ +- - image: gcr.io/google_containers/etcd-amd64:3.1.11 ++ - image: gcr.io/google_containers/etcd-amd64:3.1.12 +`, + }, + { + description: "manifests are not equal for adding new defaults", + podYamls: []string{validPod, invalidWithDefaultFields}, + expectedResult: false, + expectErr: false, + expectedDiff: `@@ -14,0 +15 @@ ++ restartPolicy: Always +`, }, { description: "first manifest doesn't exist", @@ -780,7 +811,7 @@ func TestManifestFilesAreEqual(t *testing.T) { } // compare them - result, actualErr := ManifestFilesAreEqual(filepath.Join(tmpdir, "0.yaml"), filepath.Join(tmpdir, "1.yaml")) + result, diff, actualErr := ManifestFilesAreEqual(filepath.Join(tmpdir, "0.yaml"), filepath.Join(tmpdir, "1.yaml")) if result != rt.expectedResult { t.Errorf( "ManifestFilesAreEqual failed\n%s\nexpected result: %t\nactual result: %t", @@ -798,6 +829,14 @@ func TestManifestFilesAreEqual(t *testing.T) { actualErr, ) } + if !strings.Contains(diff, rt.expectedDiff) { + t.Errorf( + "ManifestFilesAreEqual diff doesn't expected\n%s\n\texpected diff: %s\n\tactual diff: %s", + rt.description, + rt.expectedDiff, + diff, + ) + } }) } } diff --git a/cmd/kubeadm/app/util/users/users_linux.go b/cmd/kubeadm/app/util/users/users_linux.go index 63fc544c8bf95..7d1e2f79514fc 100644 --- a/cmd/kubeadm/app/util/users/users_linux.go +++ b/cmd/kubeadm/app/util/users/users_linux.go @@ -129,7 +129,7 @@ func (u *EntryMap) String() string { return strings.Join(lines, "") } -// Is a public wrapper around addUsersAndGroupsImpl with default system file paths. +// AddUsersAndGroups is a public wrapper around addUsersAndGroupsImpl with default system file paths. func AddUsersAndGroups() (*UsersAndGroups, error) { return addUsersAndGroupsImpl(fileEtcLoginDefs, fileEtcPasswd, fileEtcGroup) } diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 71ea243361c63..aafd37a528385 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -300,6 +300,8 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.StringVar(&f.CertDirectory, "cert-dir", f.CertDirectory, "The directory where the TLS certs are located. "+ "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") + fs.StringVar(&f.CloudProvider, "cloud-provider", f.CloudProvider, "The provider for cloud services. Set to empty string for running with no cloud provider. Set to 'external' for running with an external cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used).") + fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") fs.BoolVar(&f.SeccompDefault, "seccomp-default", f.SeccompDefault, "Enable the use of `RuntimeDefault` as the default seccomp profile for all workloads.") @@ -322,8 +324,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version") fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.") fs.MarkDeprecated("experimental-mounter-path", "will be removed in 1.25 or later. in favor of using CSI.") - fs.StringVar(&f.CloudProvider, "cloud-provider", f.CloudProvider, "The provider for cloud services. Set to empty string for running with no cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used).") - fs.MarkDeprecated("cloud-provider", "will be removed in 1.25 or later, in favor of removing cloud provider code from Kubelet.") fs.StringVar(&f.CloudConfigFile, "cloud-config", f.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") fs.MarkDeprecated("cloud-config", "will be removed in 1.25 or later, in favor of removing cloud provider code from Kubelet.") fs.BoolVar(&f.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "experimental-allocatable-ignore-eviction", f.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "When set to 'true', Hard Eviction Thresholds will be ignored while calculating Node Allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. [default=false]") diff --git a/cmd/kubelet/app/plugins_providers.go b/cmd/kubelet/app/plugins_providers.go index fd10d2464377b..98a0631496df7 100644 --- a/cmd/kubelet/app/plugins_providers.go +++ b/cmd/kubelet/app/plugins_providers.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/portworx" "k8s.io/kubernetes/pkg/volume/rbd" - "k8s.io/kubernetes/pkg/volume/vsphere_volume" ) type probeFn func() []volume.VolumePlugin @@ -66,7 +65,6 @@ type pluginInfo struct { func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) { pluginMigrationStatus := make(map[string]pluginInfo) pluginMigrationStatus[plugins.AzureFileInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureFile, pluginUnregisterFeature: features.InTreePluginAzureFileUnregister, pluginProbeFunction: azure_file.ProbeVolumePlugins} - pluginMigrationStatus[plugins.VSphereInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationvSphere, pluginUnregisterFeature: features.InTreePluginvSphereUnregister, pluginProbeFunction: vsphere_volume.ProbeVolumePlugins} pluginMigrationStatus[plugins.PortworxVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationPortworx, pluginUnregisterFeature: features.InTreePluginPortworxUnregister, pluginProbeFunction: portworx.ProbeVolumePlugins} pluginMigrationStatus[plugins.RBDVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationRBD, pluginUnregisterFeature: features.InTreePluginRBDUnregister, pluginProbeFunction: rbd.ProbeVolumePlugins} var err error diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 78b2d2c139473..bb81ec6873822 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -201,6 +201,7 @@ is checked every 20 seconds (also configurable with a flag).`, if cleanFlagSet.Changed("pod-infra-container-image") { klog.InfoS("--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime") + _ = cmd.Flags().MarkDeprecated("pod-infra-container-image", "--pod-infra-container-image will be removed in 1.30. Image garbage collector will get sandbox image information from CRI.") } // load kubelet config file, if provided @@ -736,7 +737,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend } if reservedSystemCPUs.Size() > 0 { // at cmd option validation phase it is tested either --system-reserved-cgroup or --kube-reserved-cgroup is specified, so overwrite should be ok - klog.InfoS("Option --reserved-cpus is specified, it will overwrite the cpu setting in KubeReserved and SystemReserved", "kubeReservedCPUs", s.KubeReserved, "systemReservedCPUs", s.SystemReserved) + klog.InfoS("Option --reserved-cpus is specified, it will overwrite the cpu setting in KubeReserved and SystemReserved", "kubeReserved", s.KubeReserved, "systemReserved", s.SystemReserved) if s.KubeReserved != nil { delete(s.KubeReserved, "cpu") } @@ -744,7 +745,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend s.SystemReserved = make(map[string]string) } s.SystemReserved["cpu"] = strconv.Itoa(reservedSystemCPUs.Size()) - klog.InfoS("After cpu setting is overwritten", "kubeReservedCPUs", s.KubeReserved, "systemReservedCPUs", s.SystemReserved) + klog.InfoS("After cpu setting is overwritten", "kubeReserved", s.KubeReserved, "systemReserved", s.SystemReserved) } kubeReserved, err := parseResourceList(s.KubeReserved) @@ -800,7 +801,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend NodeAllocatableConfig: cm.NodeAllocatableConfig{ KubeReservedCgroupName: s.KubeReservedCgroup, SystemReservedCgroupName: s.SystemReservedCgroup, - EnforceNodeAllocatable: sets.NewString(s.EnforceNodeAllocatable...), + EnforceNodeAllocatable: sets.New(s.EnforceNodeAllocatable...), KubeReserved: kubeReserved, SystemReserved: systemReserved, ReservedSystemCPUs: reservedSystemCPUs, @@ -833,6 +834,10 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend kubeDeps.PodStartupLatencyTracker = kubeletutil.NewPodStartupLatencyTracker() } + if kubeDeps.NodeStartupLatencyTracker == nil { + kubeDeps.NodeStartupLatencyTracker = kubeletutil.NewNodeStartupLatencyTracker() + } + // TODO(vmarmol): Do this through container config. oomAdjuster := kubeDeps.OOMAdjuster if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil { diff --git a/go.mod b/go.mod index d87fda5dbc2d0..4a0ccfe74bc9d 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ module k8s.io/kubernetes -go 1.20 +go 1.21.3 require ( bitbucket.org/bertimus9/systemstat v0.5.0 @@ -26,14 +26,14 @@ require ( github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-systemd/v22 v22.5.0 github.com/cpuguy83/go-md2man/v2 v2.0.2 - github.com/cyphar/filepath-securejoin v0.2.3 + github.com/cyphar/filepath-securejoin v0.2.4 github.com/distribution/reference v0.5.0 github.com/docker/go-units v0.5.0 - github.com/emicklei/go-restful/v3 v3.9.0 + github.com/emicklei/go-restful/v3 v3.11.0 github.com/evanphx/json-patch v4.12.0+incompatible - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-logr/logr v1.2.4 - github.com/godbus/dbus/v5 v5.0.6 + github.com/godbus/dbus/v5 v5.1.0 github.com/gogo/protobuf v1.3.2 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/mock v1.6.0 @@ -45,14 +45,14 @@ require ( github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.3.0 github.com/imdario/mergo v0.3.6 - github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5 + github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 github.com/libopenstorage/openstorage v1.0.0 github.com/lithammer/dedent v1.1.0 github.com/moby/ipvs v1.1.0 github.com/mrunalp/fileutils v0.5.0 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 - github.com/onsi/ginkgo/v2 v2.9.4 - github.com/onsi/gomega v1.27.6 + github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/gomega v1.28.0 github.com/opencontainers/runc v1.1.9 github.com/opencontainers/selinux v1.11.0 github.com/pkg/errors v0.9.1 @@ -63,32 +63,32 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 github.com/vishvananda/netlink v1.1.0 - github.com/vmware/govmomi v0.30.0 + github.com/vmware/govmomi v0.30.6 go.etcd.io/etcd/api/v3 v3.5.9 go.etcd.io/etcd/client/pkg/v3 v3.5.9 go.etcd.io/etcd/client/v3 v3.5.9 - go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/trace v1.10.0 - go.opentelemetry.io/proto/otlp v0.19.0 + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 + go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 + go.opentelemetry.io/otel/sdk v1.19.0 + go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/proto/otlp v1.0.0 go.uber.org/goleak v1.2.1 go.uber.org/zap v1.19.0 - golang.org/x/crypto v0.11.0 - golang.org/x/net v0.13.0 - golang.org/x/oauth2 v0.8.0 - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.10.0 - golang.org/x/term v0.10.0 + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 + golang.org/x/oauth2 v0.10.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.13.0 + golang.org/x/term v0.13.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.8.0 - google.golang.org/api v0.114.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 - google.golang.org/grpc v1.54.0 + golang.org/x/tools v0.12.0 + google.golang.org/api v0.126.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d + google.golang.org/grpc v1.58.2 google.golang.org/protobuf v1.31.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/square/go-jose.v2 v2.6.0 @@ -110,12 +110,12 @@ require ( k8s.io/csi-translation-lib v0.0.0 k8s.io/dynamic-resource-allocation v0.0.0 k8s.io/endpointslice v0.0.0 - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 k8s.io/klog/v2 v2.100.1 k8s.io/kms v0.0.0 k8s.io/kube-aggregator v0.0.0 k8s.io/kube-controller-manager v0.0.0 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/kube-proxy v0.0.0 k8s.io/kube-scheduler v0.0.0 k8s.io/kubectl v0.0.0 @@ -132,7 +132,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -175,15 +175,16 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -224,23 +225,22 @@ require ( go.etcd.io/etcd/raft/v3 v3.5.9 // indirect go.etcd.io/etcd/server/v3 v3.5.9 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/go.sum b/go.sum index 3da10057e1a51..c3a2329099d19 100644 --- a/go.sum +++ b/go.sum @@ -28,146 +28,143 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -271,11 +268,11 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= @@ -323,8 +320,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -349,8 +347,8 @@ github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -358,11 +356,11 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -379,8 +377,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -419,8 +417,9 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= @@ -432,8 +431,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -524,6 +523,8 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -536,8 +537,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -553,8 +554,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -582,8 +583,8 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5 h1:qPmlgoeRS18y2dT+iAH5vEKZgIqgiPi2Y8UCu/b7Aq8= -github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 h1:i2fYnDurfLlJH8AyyMOnkLHnHeP8Ff/DDpuZA/D3bPo= +github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -678,10 +679,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -810,8 +811,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -826,8 +828,8 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA= -github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= +github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -868,31 +870,29 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 h1:KQjX0qQ8H21oBUAvFp4ZLKJMMLIluONvSPDAFIGmX58= -go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg= -go.opentelemetry.io/contrib/propagators/b3 v1.10.0/go.mod h1:oxvamQ/mTDFQVugml/uFS59+aEUnFLhmd1wsG+n5MOE= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 h1:Z6SbqeRZAl2OczfkFOqLx1BeYBDYehNjEnqluD7581Y= +go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0/go.mod h1:XiglO+8SPMqM3Mqh5/rtxR1VHc63o8tb38QrU6tm4mU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/contrib/propagators/b3 v1.17.0 h1:ImOVvHnku8jijXqkwCSyYKRDt2YrnGXD4BbhcpfbfJo= +go.opentelemetry.io/contrib/propagators/b3 v1.17.0/go.mod h1:IkfUfMpKWmynvvE0264trz0sf32NRTZL4nuAN9AbWRc= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -919,8 +919,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -959,8 +959,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1012,8 +1012,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1030,10 +1030,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1047,8 +1046,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1121,19 +1120,18 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1146,8 +1144,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1213,8 +1211,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1250,8 +1248,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1318,14 +1316,14 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1352,11 +1350,10 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1417,15 +1414,15 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/system-validators v1.8.0 h1:tq05tdO9zdJZnNF3SXrq6LE7Knc/KfJm5wk68467JDg= k8s.io/system-validators v1.8.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1434,8 +1431,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= diff --git a/hack/conformance/check_conformance_test_requirements.go b/hack/conformance/check_conformance_test_requirements.go index 8f53d03c730b8..09252cecd164f 100644 --- a/hack/conformance/check_conformance_test_requirements.go +++ b/hack/conformance/check_conformance_test_requirements.go @@ -30,7 +30,7 @@ import ( ) const ( - //e.g. framework.ConformanceIt("should provide secure master service ", func(ctx context.Context) { + // e.g. framework.ConformanceIt("should provide secure master service", func(ctx context.Context) { patternStartConformance = `framework.ConformanceIt\(.*, func\(\) {$` patternEndConformance = `}\)$` patternSkip = `e2eskipper.Skip.*\(` diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 95b25678725fa..f3466ff76bddd 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -330,6 +330,7 @@ readonly KUBE_STATIC_LIBRARIES=( kubectl kubectl-convert kubemark + mounter ) # Fully-qualified package names that we want to instrument for coverage information. @@ -466,13 +467,23 @@ kube::golang::create_gopath_tree() { kube::golang::verify_go_version() { # default GO_VERSION to content of .go-version GO_VERSION="${GO_VERSION:-"$(cat "${KUBE_ROOT}/.go-version")"}" - # only setup go if we haven't set FORCE_HOST_GO, or `go version` doesn't match GO_VERSION - if ! ([ -n "${FORCE_HOST_GO:-}" ] || \ - (command -v go >/dev/null && [ "$(go version | cut -d' ' -f3)" = "go${GO_VERSION}" ])); then + if [ "${GOTOOLCHAIN:-auto}" != 'auto' ]; then + # no-op, just respect GOTOOLCHAIN + : + elif [ -n "${FORCE_HOST_GO:-}" ]; then + # ensure existing host version is used, like before GOTOOLCHAIN existed + export GOTOOLCHAIN='local' + else + # otherwise, we want to ensure the go version matches GO_VERSION + GOTOOLCHAIN="go${GO_VERSION}" + export GOTOOLCHAIN + # if go is either not installed or too old to respect GOTOOLCHAIN then use gimme + if ! (command -v go >/dev/null && [ "$(go version | cut -d' ' -f3)" = "${GOTOOLCHAIN}" ]); then export GIMME_ENV_PREFIX=${GIMME_ENV_PREFIX:-"${KUBE_OUTPUT}/.gimme/envs"} export GIMME_VERSION_PREFIX=${GIMME_VERSION_PREFIX:-"${KUBE_OUTPUT}/.gimme/versions"} # eval because the output of this is shell to set PATH etc. eval "$("${KUBE_ROOT}/third_party/gimme/gimme" "${GO_VERSION}")" + fi fi if [[ -z "$(command -v go)" ]]; then diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 210b7e00ad425..59550623b13c1 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -53,11 +53,11 @@ LIMITED_SWAP=${LIMITED_SWAP:-""} # required for cni installation CNI_CONFIG_DIR=${CNI_CONFIG_DIR:-/etc/cni/net.d} CNI_PLUGINS_VERSION=${CNI_PLUGINS_VERSION:-"v1.3.0"} -CNI_TARGETARCH=${CNI_TARGETARCH:-amd64} -CNI_PLUGINS_TARBALL="${CNI_PLUGINS_VERSION}/cni-plugins-linux-${CNI_TARGETARCH}-${CNI_PLUGINS_VERSION}.tgz" -CNI_PLUGINS_URL="https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_TARBALL}" +# The arch of the CNI binary, if not set, will be fetched based on the value of `uname -m` +CNI_TARGETARCH=${CNI_TARGETARCH:-""} +CNI_PLUGINS_URL="https://github.com/containernetworking/plugins/releases/download" CNI_PLUGINS_AMD64_SHA256SUM=${CNI_PLUGINS_AMD64_SHA256SUM:-"754a71ed60a4bd08726c3af705a7d55ee3df03122b12e389fdba4bea35d7dd7e"} -CNI_PLUGINS_ARM64_SHA256SUM=${CNI_PLUGINS_ARM64_SHA256SUM:-"86c4c866a01a8073ad14f6feec74de1fd63669786850c7be47521433f9570902"} +CNI_PLUGINS_ARM64_SHA256SUM=${CNI_PLUGINS_ARM64_SHA256SUM:-"de7a666fd6ad83a228086bd55756db62ef335a193d1b143d910b69f079e30598"} CNI_PLUGINS_PPC64LE_SHA256SUM=${CNI_PLUGINS_PPC64LE_SHA256SUM:-"8ceff026f4eccf33c261b4153af6911e10784ac169d08c1d86cf6887b9f4e99b"} CNI_PLUGINS_S390X_SHA256SUM=${CNI_PLUGINS_S390X_SHA256SUM:-"2f1f65ac33e961bcdc633e14c376656455824e22cc45d3ca7e31eb2750a7ebc4"} @@ -263,20 +263,8 @@ function test_apiserver_off { fi } -function detect_binary { - # Detect the OS name/arch so that we can find our binary - case "$(uname -s)" in - Darwin) - host_os=darwin - ;; - Linux) - host_os=linux - ;; - *) - echo "Unsupported host OS. Must be Linux or Mac OS X." >&2 - exit 1 - ;; - esac +function detect_arch { + local host_arch case "$(uname -m)" in x86_64*) @@ -312,7 +300,39 @@ function detect_binary { ;; esac - GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}" + if [[ -z "${host_arch}" ]]; then + return + fi + echo -n "${host_arch}" +} + +function detect_os { + local host_os + + case "$(uname -s)" in + Darwin) + host_os=darwin + ;; + Linux) + host_os=linux + ;; + *) + echo "Unsupported host OS. Must be Linux or Mac OS X." >&2 + exit 1 + ;; + esac + + if [[ -z "${host_os}" ]]; then + return + fi + echo -n "${host_os}" +} + +function detect_binary { + host_arch=$(detect_arch) + host_os=$(detect_os) + + GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}" } cleanup() @@ -1146,15 +1166,24 @@ function tolerate_cgroups_v2 { } function install_cni { - cni_plugin_sha=CNI_PLUGINS_${CNI_TARGETARCH^^}_SHA256SUM + if [[ -n "${CNI_TARGETARCH}" ]]; then + host_arch="${CNI_TARGETARCH}" + else + host_arch=$(detect_arch) + fi + + cni_plugin_sha=CNI_PLUGINS_${host_arch^^}_SHA256SUM + cni_plugin_tarball="${CNI_PLUGINS_VERSION}/cni-plugins-linux-${host_arch}-${CNI_PLUGINS_VERSION}.tgz" + cni_plugins_url="${CNI_PLUGINS_URL}/${cni_plugin_tarball}" + echo "Installing CNI plugin binaries ..." \ - && curl -sSL --retry 5 --output "${TMP_DIR}"/cni."${CNI_TARGETARCH}".tgz "${CNI_PLUGINS_URL}" \ - && echo "${!cni_plugin_sha} ${TMP_DIR}/cni.${CNI_TARGETARCH}.tgz" | tee "${TMP_DIR}"/cni.sha256 \ + && curl -sSL --retry 5 --output "${TMP_DIR}"/cni."${host_arch}".tgz "${cni_plugins_url}" \ + && echo "${!cni_plugin_sha} ${TMP_DIR}/cni.${host_arch}.tgz" | tee "${TMP_DIR}"/cni.sha256 \ && sha256sum --ignore-missing -c "${TMP_DIR}"/cni.sha256 \ && rm -f "${TMP_DIR}"/cni.sha256 \ && sudo mkdir -p /opt/cni/bin \ - && sudo tar -C /opt/cni/bin -xzvf "${TMP_DIR}"/cni."${CNI_TARGETARCH}".tgz \ - && rm -rf "${TMP_DIR}"/cni."${CNI_TARGETARCH}".tgz \ + && sudo tar -C /opt/cni/bin -xzvf "${TMP_DIR}"/cni."${host_arch}".tgz \ + && rm -rf "${TMP_DIR}"/cni."${host_arch}".tgz \ && sudo find /opt/cni/bin -type f -not \( \ -iname host-local \ -o -iname bridge \ diff --git a/hack/logcheck.conf b/hack/logcheck.conf index 9a44e6bcadc2a..31b17b42d531a 100644 --- a/hack/logcheck.conf +++ b/hack/logcheck.conf @@ -29,6 +29,8 @@ structured k8s.io/apiserver/pkg/server/options/encryptionconfig/.* # TODO next: contextual k8s.io/kubernetes/pkg/scheduler/.* # A few files involved in startup migrated already to contextual # We can't enable contextual logcheck until all are migrated +contextual k8s.io/client-go/tools/events/.* +contextual k8s.io/client-go/tools/record/.* contextual k8s.io/dynamic-resource-allocation/.* contextual k8s.io/kubernetes/cmd/kube-scheduler/.* contextual k8s.io/kubernetes/pkg/controller/.* diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh index 30a9a5bf7db87..a322a165e71c0 100755 --- a/hack/make-rules/verify.sh +++ b/hack/make-rules/verify.sh @@ -33,7 +33,8 @@ source "${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh" EXCLUDED_PATTERNS=( "verify-all.sh" # this script calls the make rule and would cause a loop "verify-*-dockerized.sh" # Don't run any scripts that intended to be run dockerized - "verify-golangci-lint-pr.sh" # Don't run this as part of the block pull-kubernetes-verify yet. TODO(pohly): try this in a non-blocking job and then reconsider this. + "verify-golangci-lint-pr.sh" # Runs in a separate job for PRs. + "verify-golangci-lint-pr-hints.sh" # Runs in a separate job for PRs. "verify-licenses.sh" # runs in a separate job to monitor availability of the dependencies periodically "verify-openapi-docs-urls.sh" # Spams docs URLs, don't run in CI. ) diff --git a/hack/unwanted-dependencies.json b/hack/unwanted-dependencies.json index 6701f41c24e40..62635aae420e4 100644 --- a/hack/unwanted-dependencies.json +++ b/hack/unwanted-dependencies.json @@ -47,6 +47,7 @@ "github.com/hashicorp/serf": "MPL license not in CNCF allowlist", "github.com/influxdata/influxdb1-client": "db/datastore clients should not be required", "github.com/json-iterator/go": "refer to #105030", + "github.com/mailru/easyjson": "unmaintained", "github.com/miekg/dns": "no dns client/server should be required", "github.com/mindprince/gonvml": "depends on nvml.h that does not appear to permit modification, redistribution", "github.com/mitchellh/cli": "MPL license not in CNCF allowlist", @@ -76,7 +77,6 @@ "cloud.google.com/go": [ "cloud.google.com/go/compute", "github.com/google/cadvisor", - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", "google.golang.org/genproto" ], "cloud.google.com/go/bigquery": [ @@ -85,6 +85,8 @@ ], "cloud.google.com/go/compute": [ "cloud.google.com/go/compute/metadata", + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", + "golang.org/x/oauth2", "google.golang.org/genproto", "google.golang.org/grpc" ], @@ -133,6 +135,10 @@ "k8s.io/kubernetes", "k8s.io/legacy-cloud-providers" ], + "github.com/google/s2a-go": [ + "cloud.google.com/go/compute", + "google.golang.org/api" + ], "github.com/google/shlex": [ "sigs.k8s.io/kustomize/api", "sigs.k8s.io/kustomize/kustomize/v5" @@ -167,6 +173,13 @@ "k8s.io/kube-openapi", "sigs.k8s.io/structured-merge-diff/v4" ], + "github.com/mailru/easyjson": [ + "github.com/go-openapi/swag", + "k8s.io/kube-openapi", + "sigs.k8s.io/kustomize/api", + "sigs.k8s.io/kustomize/kustomize/v5", + "sigs.k8s.io/kustomize/kyaml" + ], "github.com/pkg/errors": [ "github.com/Microsoft/hcsshim", "github.com/aws/aws-sdk-go", @@ -210,6 +223,7 @@ ], "google.golang.org/appengine": [ "cloud.google.com/go/compute", + "github.com/grpc-ecosystem/grpc-gateway/v2", "github.com/prometheus/client_golang", "github.com/prometheus/common", "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", @@ -234,12 +248,10 @@ "go.etcd.io/etcd/pkg/v3", "go.etcd.io/etcd/server/v3", "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", - "go.opentelemetry.io/otel/exporters/otlp/otlptrace", - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc", "google.golang.org/api", "google.golang.org/genproto/googleapis/api", - "google.golang.org/grpc", - "sigs.k8s.io/apiserver-network-proxy/konnectivity-client" + "google.golang.org/genproto/googleapis/rpc", + "google.golang.org/grpc" ] }, "unwantedVendored": [ @@ -247,6 +259,7 @@ "github.com/GoogleCloudPlatform/k8s-cloud-provider", "github.com/gogo/protobuf", "github.com/golang/mock", + "github.com/google/s2a-go", "github.com/google/shlex", "github.com/googleapis/enterprise-certificate-proxy", "github.com/googleapis/gax-go/v2", @@ -254,6 +267,7 @@ "github.com/grpc-ecosystem/go-grpc-prometheus", "github.com/grpc-ecosystem/grpc-gateway", "github.com/json-iterator/go", + "github.com/mailru/easyjson", "github.com/pkg/errors", "github.com/rubiojr/go-vhd", "go.opencensus.io", @@ -263,4 +277,4 @@ "google.golang.org/genproto" ] } -} \ No newline at end of file +} diff --git a/hack/update-internal-modules.sh b/hack/update-internal-modules.sh index 61b988c3e26d3..e623525ccb8a6 100755 --- a/hack/update-internal-modules.sh +++ b/hack/update-internal-modules.sh @@ -26,7 +26,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" MODULES=( hack/tools staging/src/k8s.io/code-generator/examples - staging/src/k8s.io/kms/internal/plugins/mock + staging/src/k8s.io/kms/internal/plugins/_mock ) # Explicitly opt into go modules, even though we're inside a GOPATH directory diff --git a/hack/verify-e2e-suites.sh b/hack/verify-e2e-suites.sh new file mode 100755 index 0000000000000..1235a838c37e6 --- /dev/null +++ b/hack/verify-e2e-suites.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script checks that all E2E test suites are sane, i.e. can be +# started without an error. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +source "${KUBE_ROOT}/hack/lib/util.sh" + +kube::golang::verify_go_version + +cd "${KUBE_ROOT}" + +kube::util::ensure-temp-dir + +for suite in $(git grep -l framework.AfterReadingAllFlags | grep -v -e ^test/e2e/framework -e ^hack | xargs -n 1 dirname | sort -u); do + # Build a binary and run it in the root directory to get paths that are + # relative to that instead of the package directory. + out="" + if (cd "$suite" && go test -c -o "${KUBE_TEMP}/e2e.bin" .) && out=$("${KUBE_TEMP}/e2e.bin" --list-tests); then + echo "E2E suite $suite passed." + else + echo >&2 "ERROR: E2E test suite invocation failed for $suite." + # shellcheck disable=SC2001 + echo "$out" | sed -e 's/^/ /' + fi +done diff --git a/hack/verify-golangci-lint-pr-hints.sh b/hack/verify-golangci-lint-pr-hints.sh new file mode 100755 index 0000000000000..7c51f3052a010 --- /dev/null +++ b/hack/verify-golangci-lint-pr-hints.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script checks a PR for the coding style for the Go language files using +# golangci-lint. It does nothing when invoked as part of a normal "make +# verify". + +set -o nounset +set -o pipefail + +if [ ! "${PULL_NUMBER:-}" ]; then + echo 'Not testing anything because this is not a pull request.' + exit 0 +fi + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +"${KUBE_ROOT}/hack/verify-golangci-lint.sh" -r "${PULL_BASE_SHA}" -n diff --git a/hack/verify-golangci-lint-pr.sh b/hack/verify-golangci-lint-pr.sh index 101cae1dedb0e..689028add309d 100755 --- a/hack/verify-golangci-lint-pr.sh +++ b/hack/verify-golangci-lint-pr.sh @@ -28,12 +28,4 @@ fi KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -# include shell2junit library -source "${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh" - -# TODO (https://github.com/kubernetes/test-infra/issues/17056): -# take this additional artifact and convert it to GitHub annotations -# to make it easier to see these problems during a PR review. -# -# -g "${ARTIFACTS}/golangci-lint-githubactions.log" -juLog -output="${ARTIFACTS:-/tmp/results}" -class="golangci" -name="golangci-strict-pr" -fail="^ERROR: " "${KUBE_ROOT}/hack/verify-golangci-lint.sh" -r "${PULL_BASE_SHA}" -s +"${KUBE_ROOT}/hack/verify-golangci-lint.sh" -r "${PULL_BASE_SHA}" -s diff --git a/hack/verify-golangci-lint.sh b/hack/verify-golangci-lint.sh index 264280949e0ea..5cd9ea2259257 100755 --- a/hack/verify-golangci-lint.sh +++ b/hack/verify-golangci-lint.sh @@ -106,10 +106,6 @@ while getopts "ar:sng:c:" o; do esac done -if [ "${golangci_config}" ]; then - golangci+=(--config="${golangci_config}") -fi - # Below the output of golangci-lint is going to be piped into sed to add # a prefix to each output line. This helps make the output more visible # in the Prow log viewer ("error" is a key word there) and ensures that @@ -159,6 +155,22 @@ pushd "${KUBE_ROOT}/hack/tools" >/dev/null fi popd >/dev/null +if [ "${golangci_config}" ]; then + # The relative path to _output/local/bin only works if that actually is the + # GOBIN. If not, then we have to make a temporary copy of the config and + # replace the path with an absolute one. This could be done also + # unconditionally, but the invocation that is printed below is nicer if we + # don't to do it when not required. + if grep -q 'path: ../_output/local/bin/' "${golangci_config}" && + [ "${GOBIN}" != "${KUBE_ROOT}/_output/local/bin" ]; then + kube::util::ensure-temp-dir + patched_golangci_config="${KUBE_TEMP}/$(basename "${golangci_config}")" + sed -e "s;path: ../_output/local/bin/;path: ${GOBIN}/;" "${golangci_config}" >"${patched_golangci_config}" + golangci_config="${patched_golangci_config}" + fi + golangci+=(--config="${golangci_config}") +fi + cd "${KUBE_ROOT}" res=0 diff --git a/hack/verify-govulncheck.sh b/hack/verify-govulncheck.sh new file mode 100755 index 0000000000000..e739285d4eef5 --- /dev/null +++ b/hack/verify-govulncheck.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +source "${KUBE_ROOT}/hack/lib/util.sh" + +# make sure everything is committed +kube::util::ensure_clean_working_dir + +# This sets up the environment, like GOCACHE, which keeps the worktree cleaner. +kube::golang::setup_env +# Opt into using go modules +export GO111MODULE=on + +go install golang.org/x/vuln/cmd/govulncheck@v1.0.1 + +# KUBE_VERIFY_GIT_BRANCH is populated in verify CI jobs +BRANCH="${KUBE_VERIFY_GIT_BRANCH:-master}" + +kube::util::ensure-temp-dir +WORKTREE="${KUBE_TEMP}/worktree" + +# Create a copy of the repo with $BRANCH checked out +git worktree add -f "${WORKTREE}" "${BRANCH}" +# Clean up the copy on exit +kube::util::trap_add "git worktree remove -f ${WORKTREE}" EXIT + +govulncheck -scan module ./... > "${KUBE_TEMP}/head.txt" +pushd "${WORKTREE}" >/dev/null + govulncheck -scan module ./... > "${KUBE_TEMP}/pr-base.txt" +popd >/dev/null + +echo -e "\n HEAD: $(cat "${KUBE_TEMP}"/head.txt)" +echo -e "\n PR_BASE: $(cat "${KUBE_TEMP}/pr-base.txt")" + +diff -s -u --ignore-all-space "${KUBE_TEMP}"/pr-base.txt "${KUBE_TEMP}"/head.txt || true diff --git a/hack/verify-pkg-names.sh b/hack/verify-pkg-names.sh index 37b7750355db4..c9904ff980bde 100755 --- a/hack/verify-pkg-names.sh +++ b/hack/verify-pkg-names.sh @@ -24,8 +24,6 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" -kube::golang::verify_go_version - cd "${KUBE_ROOT}" if git --no-pager grep -E $'^(import |\t)[a-z]+[A-Z_][a-zA-Z]* "[^"]+"$' -- '**/*.go' ':(exclude)vendor/*' ':(exclude)**/*.pb.go'; then echo "!!! Some package aliases break go conventions." diff --git a/hack/verify-prerelease-lifecycle-tags.sh b/hack/verify-prerelease-lifecycle-tags.sh index 965f4f03f6cfb..369823bf34504 100755 --- a/hack/verify-prerelease-lifecycle-tags.sh +++ b/hack/verify-prerelease-lifecycle-tags.sh @@ -24,8 +24,6 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" -kube::golang::verify_go_version - cd "${KUBE_ROOT}" if git --no-pager grep -L '// +k8s:prerelease-lifecycle-gen=true' -- 'staging/src/k8s.io/api/**/*beta*/doc.go'; then echo "!!! Some beta packages doc.go do not include prerelease-lifecycle tags." diff --git a/hack/verify-publishing-bot.py b/hack/verify-publishing-bot.py index 1e9f6c69c2b34..ee6b884bca880 100755 --- a/hack/verify-publishing-bot.py +++ b/hack/verify-publishing-bot.py @@ -80,7 +80,13 @@ def main(): continue for item in rule["branches"]: - if not item["source"]["dir"].endswith(rule["destination"]): + if "dir" in item["source"]: + raise Exception("use of deprecated `dir` field in rules for `%s`" % (rule["destination"])) + if len(item["source"]["dirs"]) > 1: + raise Exception("cannot have more than one directory (`%s`) per source branch `%s` of `%s`" % + (item["source"]["dirs"], item["source"]["branch"], rule["destination"]) + ) + if not item["source"]["dirs"][0].endswith(rule["destination"]): raise Exception("copy/paste error `%s` refers to `%s`" % (rule["destination"],item["source"]["dir"])) if branch["name"] != "master": diff --git a/pkg/api/pod/util.go b/pkg/api/pod/util.go index 6b7394fe8eb32..e9ca7c271d5f3 100644 --- a/pkg/api/pod/util.go +++ b/pkg/api/pod/util.go @@ -534,7 +534,8 @@ func dropDisabledFields( dropDisabledTopologySpreadConstraintsFields(podSpec, oldPodSpec) dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec) - dropDisabledMatchLabelKeysField(podSpec, oldPodSpec) + dropDisabledMatchLabelKeysFieldInTopologySpread(podSpec, oldPodSpec) + dropDisabledMatchLabelKeysFieldInPodAffinity(podSpec, oldPodSpec) dropDisabledDynamicResourceAllocationFields(podSpec, oldPodSpec) if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) { @@ -558,6 +559,64 @@ func dropDisabledFields( } // For other types of containers, validateContainers will handle them. } + + if !utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepAction) && !podLifecycleSleepActionInUse(oldPodSpec) { + for i := range podSpec.Containers { + if podSpec.Containers[i].Lifecycle == nil { + continue + } + if podSpec.Containers[i].Lifecycle.PreStop != nil { + podSpec.Containers[i].Lifecycle.PreStop.Sleep = nil + } + if podSpec.Containers[i].Lifecycle.PostStart != nil { + podSpec.Containers[i].Lifecycle.PostStart.Sleep = nil + } + } + for i := range podSpec.InitContainers { + if podSpec.InitContainers[i].Lifecycle == nil { + continue + } + if podSpec.InitContainers[i].Lifecycle.PreStop != nil { + podSpec.InitContainers[i].Lifecycle.PreStop.Sleep = nil + } + if podSpec.InitContainers[i].Lifecycle.PostStart != nil { + podSpec.InitContainers[i].Lifecycle.PostStart.Sleep = nil + } + } + for i := range podSpec.EphemeralContainers { + if podSpec.EphemeralContainers[i].Lifecycle == nil { + continue + } + if podSpec.EphemeralContainers[i].Lifecycle.PreStop != nil { + podSpec.EphemeralContainers[i].Lifecycle.PreStop.Sleep = nil + } + if podSpec.EphemeralContainers[i].Lifecycle.PostStart != nil { + podSpec.EphemeralContainers[i].Lifecycle.PostStart.Sleep = nil + } + } + } +} + +func podLifecycleSleepActionInUse(podSpec *api.PodSpec) bool { + if podSpec == nil { + return false + } + var inUse bool + VisitContainers(podSpec, AllContainers, func(c *api.Container, containerType ContainerType) bool { + if c.Lifecycle == nil { + return true + } + if c.Lifecycle.PreStop != nil && c.Lifecycle.PreStop.Sleep != nil { + inUse = true + return false + } + if c.Lifecycle.PostStart != nil && c.Lifecycle.PostStart.Sleep != nil { + inUse = true + return false + } + return true + }) + return inUse } // dropDisabledPodStatusFields removes disabled fields from the pod status @@ -694,19 +753,89 @@ func dropDisabledNodeInclusionPolicyFields(podSpec, oldPodSpec *api.PodSpec) { } } -// dropDisabledMatchLabelKeysField removes disabled fields from PodSpec related -// to MatchLabelKeys only if it is not already used by the old spec. -func dropDisabledMatchLabelKeysField(podSpec, oldPodSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread) && !matchLabelKeysInUse(oldPodSpec) { +// dropDisabledMatchLabelKeysFieldInPodAffinity removes disabled fields from PodSpec related +// to MatchLabelKeys in required/preferred PodAffinity/PodAntiAffinity only if it is not already used by the old spec. +func dropDisabledMatchLabelKeysFieldInPodAffinity(podSpec, oldPodSpec *api.PodSpec) { + if podSpec == nil || podSpec.Affinity == nil || utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodAffinity) || matchLabelKeysFieldInPodAffinityInUse(oldPodSpec) { + return + } + + if affinity := podSpec.Affinity.PodAffinity; affinity != nil { + dropMatchLabelKeysFieldInPodAffnityTerm(affinity.RequiredDuringSchedulingIgnoredDuringExecution) + dropMatchLabelKeysFieldInWeightedPodAffnityTerm(affinity.PreferredDuringSchedulingIgnoredDuringExecution) + } + if antiaffinity := podSpec.Affinity.PodAntiAffinity; antiaffinity != nil { + dropMatchLabelKeysFieldInPodAffnityTerm(antiaffinity.RequiredDuringSchedulingIgnoredDuringExecution) + dropMatchLabelKeysFieldInWeightedPodAffnityTerm(antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution) + } +} + +// dropDisabledMatchLabelKeysFieldInTopologySpread removes disabled fields from PodSpec related +// to MatchLabelKeys in TopologySpread only if it is not already used by the old spec. +func dropDisabledMatchLabelKeysFieldInTopologySpread(podSpec, oldPodSpec *api.PodSpec) { + if !utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodTopologySpread) && !matchLabelKeysInTopologySpreadInUse(oldPodSpec) { for i := range podSpec.TopologySpreadConstraints { podSpec.TopologySpreadConstraints[i].MatchLabelKeys = nil } } } -// matchLabelKeysInUse returns true if the pod spec is non-nil +// dropMatchLabelKeysFieldInWeightedPodAffnityTerm removes MatchLabelKeys and MismatchLabelKeys fields from WeightedPodAffinityTerm +func dropMatchLabelKeysFieldInWeightedPodAffnityTerm(terms []api.WeightedPodAffinityTerm) { + for i := range terms { + terms[i].PodAffinityTerm.MatchLabelKeys = nil + terms[i].PodAffinityTerm.MismatchLabelKeys = nil + } +} + +// dropMatchLabelKeysFieldInPodAffnityTerm removes MatchLabelKeys and MismatchLabelKeys fields from PodAffinityTerm +func dropMatchLabelKeysFieldInPodAffnityTerm(terms []api.PodAffinityTerm) { + for i := range terms { + terms[i].MatchLabelKeys = nil + terms[i].MismatchLabelKeys = nil + } +} + +// matchLabelKeysFieldInPodAffinityInUse returns true if given affinityTerms have MatchLabelKeys field set. +func matchLabelKeysFieldInPodAffinityInUse(podSpec *api.PodSpec) bool { + if podSpec == nil || podSpec.Affinity == nil { + return false + } + + if affinity := podSpec.Affinity.PodAffinity; affinity != nil { + for _, c := range affinity.RequiredDuringSchedulingIgnoredDuringExecution { + if len(c.MatchLabelKeys) > 0 || len(c.MismatchLabelKeys) > 0 { + return true + } + } + + for _, c := range affinity.PreferredDuringSchedulingIgnoredDuringExecution { + if len(c.PodAffinityTerm.MatchLabelKeys) > 0 || len(c.PodAffinityTerm.MismatchLabelKeys) > 0 { + return true + } + } + } + + if antiAffinity := podSpec.Affinity.PodAntiAffinity; antiAffinity != nil { + for _, c := range antiAffinity.RequiredDuringSchedulingIgnoredDuringExecution { + if len(c.MatchLabelKeys) > 0 || len(c.MismatchLabelKeys) > 0 { + return true + } + } + + for _, c := range antiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + if len(c.PodAffinityTerm.MatchLabelKeys) > 0 || len(c.PodAffinityTerm.MismatchLabelKeys) > 0 { + return true + } + } + } + + return false +} + +// matchLabelKeysInTopologySpreadInUse returns true if the pod spec is non-nil // and has MatchLabelKeys field set in TopologySpreadConstraints. -func matchLabelKeysInUse(podSpec *api.PodSpec) bool { +func matchLabelKeysInTopologySpreadInUse(podSpec *api.PodSpec) bool { if podSpec == nil { return false } diff --git a/pkg/api/pod/util_test.go b/pkg/api/pod/util_test.go index b43e52e1595ea..ec33d2009b5c1 100644 --- a/pkg/api/pod/util_test.go +++ b/pkg/api/pod/util_test.go @@ -585,8 +585,8 @@ func TestDropFSGroupFields(t *testing.T) { t.Errorf("for %s, expected fsGroupChangepolicy found none", podInfo.description) } } else { - secConext := newPod.Spec.SecurityContext - if secConext != nil && secConext.FSGroupChangePolicy != nil { + secContext := newPod.Spec.SecurityContext + if secContext != nil && secContext.FSGroupChangePolicy != nil { t.Errorf("for %s, unexpected fsGroupChangepolicy set", podInfo.description) } } @@ -1515,7 +1515,862 @@ func TestDropNodeInclusionPolicyFields(t *testing.T) { } } -func TestDropDisabledMatchLabelKeysField(t *testing.T) { +func Test_dropDisabledMatchLabelKeysFieldInPodAffinity(t *testing.T) { + tests := []struct { + name string + enabled bool + podSpec *api.PodSpec + oldPodSpec *api.PodSpec + wantPodSpec *api.PodSpec + }{ + { + name: "[PodAffinity/required] feature disabled, both pods don't use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAffinity/required] feature disabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAffinity/required] feature disabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{{}}, + }, + }, + }, + }, + { + name: "[PodAffinity/required] feature disabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + { + name: "[PodAffinity/required] feature enabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAffinity/required] feature enabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + { + name: "[PodAffinity/required] feature enabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature disabled, both pods don't use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature disabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature disabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{{}}, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature disabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature enabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature enabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + }, + { + name: "[PodAffinity/preferred] feature enabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature disabled, both pods don't use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature disabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature disabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{{}}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature disabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature enabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature enabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/required] feature enabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + {MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + + { + name: "[PodAntiAffinity/preferred] feature disabled, both pods don't use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/preferred] feature disabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/preferred] feature disabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{{}}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/preferred] feature disabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/preferred] feature enabled, only old pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/preferred] feature enabled, only current pod uses MatchLabelKeys/MismatchLabelKeys field", + enabled: true, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{}, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + }, + { + name: "[PodAntiAffinity/preferred] feature enabled, both pods use MatchLabelKeys/MismatchLabelKeys fields", + enabled: false, + oldPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + podSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + wantPodSpec: &api.PodSpec{ + Affinity: &api.Affinity{ + PodAntiAffinity: &api.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{MatchLabelKeys: []string{"foo"}, MismatchLabelKeys: []string{"foo"}}, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodAffinity, test.enabled)() + + dropDisabledFields(test.podSpec, nil, test.oldPodSpec, nil) + if diff := cmp.Diff(test.wantPodSpec, test.podSpec); diff != "" { + t.Errorf("unexpected pod spec (-want, +got):\n%s", diff) + } + }) + } +} + +func Test_dropDisabledMatchLabelKeysFieldInTopologySpread(t *testing.T) { tests := []struct { name string enabled bool diff --git a/pkg/api/service/util_test.go b/pkg/api/service/util_test.go index a0fc76861b5a6..d562a4d24811a 100644 --- a/pkg/api/service/util_test.go +++ b/pkg/api/service/util_test.go @@ -26,6 +26,7 @@ import ( func TestGetLoadBalancerSourceRanges(t *testing.T) { checkError := func(v string) { + t.Helper() annotations := make(map[string]string) annotations[api.AnnotationLoadBalancerSourceRangesKey] = v svc := api.Service{} @@ -49,6 +50,7 @@ func TestGetLoadBalancerSourceRanges(t *testing.T) { checkError("10.0.0.1") checkOK := func(v string) utilnet.IPNetSet { + t.Helper() annotations := make(map[string]string) annotations[api.AnnotationLoadBalancerSourceRangesKey] = v svc := api.Service{} @@ -112,6 +114,7 @@ func TestGetLoadBalancerSourceRanges(t *testing.T) { func TestAllowAll(t *testing.T) { checkAllowAll := func(allowAll bool, cidrs ...string) { + t.Helper() ipnets, err := utilnet.ParseIPNets(cidrs...) if err != nil { t.Errorf("Unexpected error parsing cidrs: %v", cidrs) @@ -131,6 +134,7 @@ func TestAllowAll(t *testing.T) { func TestExternallyAccessible(t *testing.T) { checkExternallyAccessible := func(expect bool, service *api.Service) { + t.Helper() res := ExternallyAccessible(service) if res != expect { t.Errorf("Expected ExternallyAccessible = %v, got %v", expect, res) @@ -174,6 +178,7 @@ func TestExternallyAccessible(t *testing.T) { func TestRequestsOnlyLocalTraffic(t *testing.T) { checkRequestsOnlyLocalTraffic := func(requestsOnlyLocalTraffic bool, service *api.Service) { + t.Helper() res := RequestsOnlyLocalTraffic(service) if res != requestsOnlyLocalTraffic { t.Errorf("Expected requests OnlyLocal traffic = %v, got %v", @@ -220,6 +225,7 @@ func TestRequestsOnlyLocalTraffic(t *testing.T) { func TestNeedsHealthCheck(t *testing.T) { checkNeedsHealthCheck := func(needsHealthCheck bool, service *api.Service) { + t.Helper() res := NeedsHealthCheck(service) if res != needsHealthCheck { t.Errorf("Expected needs health check = %v, got %v", diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index 0e894791d14c2..fa958884df973 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -53,7 +53,7 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { var containerStatuses map[string]*v1.ContainerStatus if opts.InPlacePodVerticalScalingEnabled { - containerStatuses = map[string]*v1.ContainerStatus{} + containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } diff --git a/pkg/api/v1/service/util_test.go b/pkg/api/v1/service/util_test.go index 5d9f02bc5de57..988b69a244bc6 100644 --- a/pkg/api/v1/service/util_test.go +++ b/pkg/api/v1/service/util_test.go @@ -26,6 +26,7 @@ import ( func TestGetLoadBalancerSourceRanges(t *testing.T) { checkError := func(v string) { + t.Helper() annotations := make(map[string]string) annotations[v1.AnnotationLoadBalancerSourceRangesKey] = v svc := v1.Service{} @@ -49,6 +50,7 @@ func TestGetLoadBalancerSourceRanges(t *testing.T) { checkError("10.0.0.1") checkOK := func(v string) utilnet.IPNetSet { + t.Helper() annotations := make(map[string]string) annotations[v1.AnnotationLoadBalancerSourceRangesKey] = v svc := v1.Service{} @@ -112,6 +114,7 @@ func TestGetLoadBalancerSourceRanges(t *testing.T) { func TestAllowAll(t *testing.T) { checkAllowAll := func(allowAll bool, cidrs ...string) { + t.Helper() ipnets, err := utilnet.ParseIPNets(cidrs...) if err != nil { t.Errorf("Unexpected error parsing cidrs: %v", cidrs) @@ -131,6 +134,7 @@ func TestAllowAll(t *testing.T) { func TestExternallyAccessible(t *testing.T) { checkExternallyAccessible := func(expect bool, service *v1.Service) { + t.Helper() res := ExternallyAccessible(service) if res != expect { t.Errorf("Expected ExternallyAccessible = %v, got %v", expect, res) @@ -174,6 +178,7 @@ func TestExternallyAccessible(t *testing.T) { func TestExternalPolicyLocal(t *testing.T) { checkExternalPolicyLocal := func(requestsOnlyLocalTraffic bool, service *v1.Service) { + t.Helper() res := ExternalPolicyLocal(service) if res != requestsOnlyLocalTraffic { t.Errorf("Expected requests OnlyLocal traffic = %v, got %v", @@ -240,6 +245,7 @@ func TestExternalPolicyLocal(t *testing.T) { func TestNeedsHealthCheck(t *testing.T) { checkNeedsHealthCheck := func(needsHealthCheck bool, service *v1.Service) { + t.Helper() res := NeedsHealthCheck(service) if res != needsHealthCheck { t.Errorf("Expected needs health check = %v, got %v", @@ -280,6 +286,7 @@ func TestNeedsHealthCheck(t *testing.T) { func TestInternalPolicyLocal(t *testing.T) { checkInternalPolicyLocal := func(expected bool, service *v1.Service) { + t.Helper() res := InternalPolicyLocal(service) if res != expected { t.Errorf("Expected internal local traffic = %v, got %v", diff --git a/pkg/apis/admissionregistration/validation/validation.go b/pkg/apis/admissionregistration/validation/validation.go index 4ea4ffe78ce59..b7cf40281c3a4 100644 --- a/pkg/apis/admissionregistration/validation/validation.go +++ b/pkg/apis/admissionregistration/validation/validation.go @@ -854,10 +854,10 @@ func validateMatchResources(mc *admissionregistration.MatchResources, fldPath *f } if mc.ObjectSelector == nil { - allErrors = append(allErrors, field.Required(fldPath.Child("labelSelector"), "")) + allErrors = append(allErrors, field.Required(fldPath.Child("objectSelector"), "")) } else { // validate selector strictly, this type was released after issue #99139 was resolved - allErrors = append(allErrors, metav1validation.ValidateLabelSelector(mc.ObjectSelector, metav1validation.LabelSelectorValidationOptions{}, fldPath.Child("labelSelector"))...) + allErrors = append(allErrors, metav1validation.ValidateLabelSelector(mc.ObjectSelector, metav1validation.LabelSelectorValidationOptions{}, fldPath.Child("objectSelector"))...) } for i, namedRuleWithOperations := range mc.ResourceRules { diff --git a/pkg/apis/batch/fuzzer/fuzzer.go b/pkg/apis/batch/fuzzer/fuzzer.go index 367ddf59c9428..832de7d2f6650 100644 --- a/pkg/apis/batch/fuzzer/fuzzer.go +++ b/pkg/apis/batch/fuzzer/fuzzer.go @@ -45,11 +45,7 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} { j.Completions = &completions j.Parallelism = ¶llelism j.BackoffLimit = &backoffLimit - if c.Rand.Int31()%2 == 0 { - j.ManualSelector = pointer.Bool(true) - } else { - j.ManualSelector = nil - } + j.ManualSelector = pointer.Bool(c.RandBool()) mode := batch.NonIndexedCompletion if c.RandBool() { mode = batch.IndexedCompletion diff --git a/pkg/apis/batch/types.go b/pkg/apis/batch/types.go index cb5e6eb22e70d..266ea73de90d0 100644 --- a/pkg/apis/batch/types.go +++ b/pkg/apis/batch/types.go @@ -306,8 +306,8 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional BackoffLimitPerIndex *int32 @@ -319,8 +319,8 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional MaxFailedIndexes *int32 @@ -481,8 +481,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional FailedIndexes *string diff --git a/pkg/apis/batch/v1/defaults.go b/pkg/apis/batch/v1/defaults.go index 7b6d585cd7105..3a0033fb38ddd 100644 --- a/pkg/apis/batch/v1/defaults.go +++ b/pkg/apis/batch/v1/defaults.go @@ -79,6 +79,9 @@ func SetDefaults_Job(obj *batchv1.Job) { } } } + if obj.Spec.ManualSelector == nil { + obj.Spec.ManualSelector = utilpointer.Bool(false) + } } func SetDefaults_CronJob(obj *batchv1.CronJob) { diff --git a/pkg/apis/batch/v1/defaults_test.go b/pkg/apis/batch/v1/defaults_test.go index 1da8c4eaeb8e3..6c5fe47269acc 100644 --- a/pkg/apis/batch/v1/defaults_test.go +++ b/pkg/apis/batch/v1/defaults_test.go @@ -98,6 +98,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), PodFailurePolicy: &batchv1.PodFailurePolicy{ Rules: []batchv1.PodFailurePolicyRule{ { @@ -166,6 +167,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), PodReplacementPolicy: podReplacementPtr(batchv1.Failed), + ManualSelector: pointer.Bool(false), PodFailurePolicy: &batchv1.PodFailurePolicy{ Rules: []batchv1.PodFailurePolicyRule{ { @@ -198,6 +200,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), PodReplacementPolicy: podReplacementPtr(batchv1.TerminatingOrFailed), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -218,6 +221,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -237,6 +241,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -257,6 +262,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(true), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -279,6 +285,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, }, @@ -297,6 +304,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -316,6 +324,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -336,6 +345,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(6), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -356,6 +366,7 @@ func TestSetDefaultJob(t *testing.T) { BackoffLimit: pointer.Int32(5), CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -369,6 +380,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), PodReplacementPolicy: podReplacementPtr(batchv1.TerminatingOrFailed), + ManualSelector: pointer.Bool(false), Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, }, @@ -382,6 +394,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.NonIndexedCompletion), Suspend: pointer.Bool(false), PodReplacementPolicy: podReplacementPtr(batchv1.TerminatingOrFailed), + ManualSelector: pointer.Bool(false), Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, }, @@ -398,6 +411,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.IndexedCompletion), Suspend: pointer.Bool(true), PodReplacementPolicy: podReplacementPtr(batchv1.Failed), + ManualSelector: pointer.Bool(true), Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, }, @@ -411,6 +425,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.IndexedCompletion), Suspend: pointer.Bool(true), PodReplacementPolicy: podReplacementPtr(batchv1.Failed), + ManualSelector: pointer.Bool(true), }, }, expectLabels: true, @@ -424,6 +439,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.IndexedCompletion), Template: validPodTemplateSpec, Suspend: pointer.Bool(true), + ManualSelector: pointer.Bool(false), }, }, expected: &batchv1.Job{ @@ -435,6 +451,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.IndexedCompletion), Template: validPodTemplateSpec, Suspend: pointer.Bool(true), + ManualSelector: pointer.Bool(false), }, }, expectLabels: true, @@ -449,6 +466,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.IndexedCompletion), Template: validPodTemplateSpec, Suspend: pointer.Bool(true), + ManualSelector: pointer.Bool(true), }, }, expected: &batchv1.Job{ @@ -460,6 +478,7 @@ func TestSetDefaultJob(t *testing.T) { CompletionMode: completionModePtr(batchv1.IndexedCompletion), Template: validPodTemplateSpec, Suspend: pointer.Bool(true), + ManualSelector: pointer.Bool(true), }, }, expectLabels: true, @@ -500,6 +519,9 @@ func TestSetDefaultJob(t *testing.T) { if diff := cmp.Diff(expected.Spec.PodReplacementPolicy, actual.Spec.PodReplacementPolicy); diff != "" { t.Errorf("Unexpected PodReplacementPolicy (-want,+got):\n%s", diff) } + if diff := cmp.Diff(expected.Spec.ManualSelector, actual.Spec.ManualSelector); diff != "" { + t.Errorf("Unexpected ManualSelector (-want,+got):\n%s", diff) + } }) } } diff --git a/pkg/apis/batch/validation/validation.go b/pkg/apis/batch/validation/validation.go index 23650fb748108..e8c22ba7be680 100644 --- a/pkg/apis/batch/validation/validation.go +++ b/pkg/apis/batch/validation/validation.go @@ -523,7 +523,11 @@ func validateCronJobSpec(spec, oldSpec *batch.CronJobSpec, fldPath *field.Path, if len(spec.Schedule) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("schedule"), "")) } else { - allErrs = append(allErrs, validateScheduleFormat(spec.Schedule, spec.TimeZone, fldPath.Child("schedule"))...) + allowTZInSchedule := false + if oldSpec != nil { + allowTZInSchedule = strings.Contains(oldSpec.Schedule, "TZ") + } + allErrs = append(allErrs, validateScheduleFormat(spec.Schedule, allowTZInSchedule, spec.TimeZone, fldPath.Child("schedule"))...) } if spec.StartingDeadlineSeconds != nil { @@ -564,13 +568,16 @@ func validateConcurrencyPolicy(concurrencyPolicy *batch.ConcurrencyPolicy, fldPa return allErrs } -func validateScheduleFormat(schedule string, timeZone *string, fldPath *field.Path) field.ErrorList { +func validateScheduleFormat(schedule string, allowTZInSchedule bool, timeZone *string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if _, err := cron.ParseStandard(schedule); err != nil { allErrs = append(allErrs, field.Invalid(fldPath, schedule, err.Error())) } - if strings.Contains(schedule, "TZ") && timeZone != nil { + switch { + case allowTZInSchedule && strings.Contains(schedule, "TZ") && timeZone != nil: allErrs = append(allErrs, field.Invalid(fldPath, schedule, "cannot use both timeZone field and TZ or CRON_TZ in schedule")) + case !allowTZInSchedule && strings.Contains(schedule, "TZ"): + allErrs = append(allErrs, field.Invalid(fldPath, schedule, "cannot use TZ or CRON_TZ in schedule, use timeZone field instead")) } return allErrs diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index 084f06567bc2c..5288219551214 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -2284,23 +2284,6 @@ func TestValidateCronJob(t *testing.T) { }, }, }, - "spec.schedule: cannot use both timeZone field and TZ or CRON_TZ in schedule": { - ObjectMeta: metav1.ObjectMeta{ - Name: "mycronjob", - Namespace: metav1.NamespaceDefault, - UID: types.UID("1a2b3c"), - }, - Spec: batch.CronJobSpec{ - Schedule: "TZ=UTC 0 * * * *", - TimeZone: &timeZoneUTC, - ConcurrencyPolicy: batch.AllowConcurrent, - JobTemplate: batch.JobTemplateSpec{ - Spec: batch.JobSpec{ - Template: validPodTemplateSpec, - }, - }, - }, - }, "spec.timeZone: timeZone must be nil or non-empty string": { ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", @@ -2673,6 +2656,125 @@ func TestValidateCronJob(t *testing.T) { } } +func TestValidateCronJobScheduleTZ(t *testing.T) { + validPodTemplateSpec := getValidPodTemplateSpecForGenerated(getValidGeneratedSelector()) + validPodTemplateSpec.Labels = map[string]string{} + validSchedule := "0 * * * *" + invalidSchedule := "TZ=UTC 0 * * * *" + invalidCronJob := &batch.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mycronjob", + Namespace: metav1.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.CronJobSpec{ + Schedule: invalidSchedule, + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Template: validPodTemplateSpec, + }, + }, + }, + } + validCronJob := &batch.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mycronjob", + Namespace: metav1.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.CronJobSpec{ + Schedule: validSchedule, + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Template: validPodTemplateSpec, + }, + }, + }, + } + + testCases := map[string]struct { + cronJob *batch.CronJob + createErr string + update func(*batch.CronJob) + updateErr string + }{ + "update removing TZ should work": { + cronJob: invalidCronJob, + createErr: "cannot use TZ or CRON_TZ in schedule", + update: func(cj *batch.CronJob) { + cj.Spec.Schedule = validSchedule + }, + }, + "update not modifying TZ should work": { + cronJob: invalidCronJob, + createErr: "cannot use TZ or CRON_TZ in schedule, use timeZone field instead", + update: func(cj *batch.CronJob) { + cj.Spec.Schedule = invalidSchedule + }, + }, + "update not modifying TZ but adding .spec.timeZone should fail": { + cronJob: invalidCronJob, + createErr: "cannot use TZ or CRON_TZ in schedule, use timeZone field instead", + update: func(cj *batch.CronJob) { + cj.Spec.TimeZone = &timeZoneUTC + }, + updateErr: "cannot use both timeZone field and TZ or CRON_TZ in schedule", + }, + "update adding TZ should fail": { + cronJob: validCronJob, + update: func(cj *batch.CronJob) { + cj.Spec.Schedule = invalidSchedule + }, + updateErr: "cannot use TZ or CRON_TZ in schedule", + }, + } + + for k, v := range testCases { + t.Run(k, func(t *testing.T) { + errs := ValidateCronJobCreate(v.cronJob, corevalidation.PodValidationOptions{}) + if len(errs) > 0 { + err := errs[0] + if len(v.createErr) == 0 { + t.Errorf("unexpected error: %#v, none expected", err) + return + } + if !strings.Contains(err.Error(), v.createErr) { + t.Errorf("unexpected error: %v, expected: %s", err, v.createErr) + } + } else if len(v.createErr) != 0 { + t.Errorf("no error, expected %v", v.createErr) + return + } + + oldSpec := v.cronJob.DeepCopy() + oldSpec.ResourceVersion = "1" + + newSpec := v.cronJob.DeepCopy() + newSpec.ResourceVersion = "2" + if v.update != nil { + v.update(newSpec) + } + + errs = ValidateCronJobUpdate(newSpec, oldSpec, corevalidation.PodValidationOptions{}) + if len(errs) > 0 { + err := errs[0] + if len(v.updateErr) == 0 { + t.Errorf("unexpected error: %#v, none expected", err) + return + } + if !strings.Contains(err.Error(), v.updateErr) { + t.Errorf("unexpected error: %v, expected: %s", err, v.updateErr) + } + } else if len(v.updateErr) != 0 { + t.Errorf("no error, expected %v", v.updateErr) + return + } + }) + } +} + func TestValidateCronJobSpec(t *testing.T) { validPodTemplateSpec := getValidPodTemplateSpecForGenerated(getValidGeneratedSelector()) validPodTemplateSpec.Labels = map[string]string{} diff --git a/pkg/apis/core/helper/qos/qos.go b/pkg/apis/core/helper/qos/qos.go index 8401cb6c3004a..b32fffa0e3fa3 100644 --- a/pkg/apis/core/helper/qos/qos.go +++ b/pkg/apis/core/helper/qos/qos.go @@ -30,12 +30,22 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// GetPodQOS returns the QoS class of a pod. +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { + if pod.Status.QOSClass != "" { + return pod.Status.QOSClass + } + return ComputePodQOS(pod) +} + +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more +// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. // When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go -func GetPodQOS(pod *core.Pod) core.PodQOSClass { +func ComputePodQOS(pod *core.Pod) core.PodQOSClass { requests := core.ResourceList{} limits := core.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index e40b8bfa10470..f29952f637bbd 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -2150,6 +2150,12 @@ type ExecAction struct { Command []string } +// SleepAction describes a "sleep" action. +type SleepAction struct { + // Seconds is the number of seconds to sleep. + Seconds int64 +} + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2432,6 +2438,10 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + Sleep *SleepAction } type GRPCAction struct { @@ -2892,6 +2902,7 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running. type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector // namespaces specifies a static list of namespace names that the term applies to. @@ -2913,6 +2924,24 @@ type PodAffinityTerm struct { // An empty selector ({}) matches all namespaces. // +optional NamespaceSelector *metav1.LabelSelector + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // +listType=atomic + // +optional + MatchLabelKeys []string + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // +listType=atomic + // +optional + MismatchLabelKeys []string } // NodeAffinity is a group of node affinity scheduling rules. @@ -4325,7 +4354,7 @@ type ServicePort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -4490,7 +4519,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/pkg/apis/core/v1/helper/qos/qos.go b/pkg/apis/core/v1/helper/qos/qos.go index 10a6cb07911a5..79e2eb2abd2ef 100644 --- a/pkg/apis/core/v1/helper/qos/qos.go +++ b/pkg/apis/core/v1/helper/qos/qos.go @@ -32,11 +32,21 @@ func isSupportedQoSComputeResource(name v1.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// GetPodQOS returns the QoS class of a pod. +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { + if pod.Status.QOSClass != "" { + return pod.Status.QOSClass + } + return ComputePodQOS(pod) +} + +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more +// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { +func ComputePodQOS(pod *v1.Pod) v1.PodQOSClass { requests := v1.ResourceList{} limits := v1.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/pkg/apis/core/v1/helper/qos/qos_test.go b/pkg/apis/core/v1/helper/qos/qos_test.go index 6dd45f614386d..d16c17a14e737 100644 --- a/pkg/apis/core/v1/helper/qos/qos_test.go +++ b/pkg/apis/core/v1/helper/qos/qos_test.go @@ -27,7 +27,7 @@ import ( corev1 "k8s.io/kubernetes/pkg/apis/core/v1" ) -func TestGetPodQOS(t *testing.T) { +func TestComputePodQOS(t *testing.T) { testCases := []struct { pod *v1.Pod expected v1.PodQOSClass @@ -128,15 +128,15 @@ func TestGetPodQOS(t *testing.T) { }, } for id, testCase := range testCases { - if actual := GetPodQOS(testCase.pod); testCase.expected != actual { + if actual := ComputePodQOS(testCase.pod); testCase.expected != actual { t.Errorf("[%d]: invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual) } - // Convert v1.Pod to core.Pod, and then check against `core.helper.GetPodQOS`. + // Convert v1.Pod to core.Pod, and then check against `core.helper.ComputePodQOS`. pod := core.Pod{} corev1.Convert_v1_Pod_To_core_Pod(testCase.pod, &pod, nil) - if actual := qos.GetPodQOS(&pod); core.PodQOSClass(testCase.expected) != actual { + if actual := qos.ComputePodQOS(&pod); core.PodQOSClass(testCase.expected) != actual { t.Errorf("[%d]: conversion invalid qos pod %s, expected: %s, actual: %s", id, testCase.pod.Name, testCase.expected, actual) } } diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index e69a632041cab..f5eeabb4e6711 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -1937,6 +1937,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.SleepAction)(nil), (*core.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SleepAction_To_core_SleepAction(a.(*v1.SleepAction), b.(*core.SleepAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SleepAction)(nil), (*v1.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SleepAction_To_v1_SleepAction(a.(*core.SleepAction), b.(*v1.SleepAction), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*v1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope) }); err != nil { @@ -4325,6 +4335,7 @@ func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHa out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + out.Sleep = (*core.SleepAction)(unsafe.Pointer(in.Sleep)) return nil } @@ -4337,6 +4348,7 @@ func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.Lifecycle out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + out.Sleep = (*v1.SleepAction)(unsafe.Pointer(in.Sleep)) return nil } @@ -5677,6 +5689,8 @@ func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTe out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys)) + out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys)) return nil } @@ -5690,6 +5704,8 @@ func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinity out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) out.TopologyKey = in.TopologyKey out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys)) + out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys)) return nil } @@ -8067,6 +8083,26 @@ func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.Ses return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) } +func autoConvert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error { + out.Seconds = in.Seconds + return nil +} + +// Convert_v1_SleepAction_To_core_SleepAction is an autogenerated conversion function. +func Convert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error { + return autoConvert_v1_SleepAction_To_core_SleepAction(in, out, s) +} + +func autoConvert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error { + out.Seconds = in.Seconds + return nil +} + +// Convert_core_SleepAction_To_v1_SleepAction is an autogenerated conversion function. +func Convert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error { + return autoConvert_core_SleepAction_To_v1_SleepAction(in, out, s) +} + func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { out.VolumeName = in.VolumeName out.VolumeNamespace = in.VolumeNamespace diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index d66823eeb8563..3a56f256b83b1 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -1653,33 +1653,20 @@ var allowedTemplateObjectMetaFields = map[string]bool{ // PersistentVolumeSpecValidationOptions contains the different settings for PeristentVolume validation type PersistentVolumeSpecValidationOptions struct { - // Allow spec to contain the "ReadWiteOncePod" access mode - AllowReadWriteOncePod bool } // ValidatePersistentVolumeName checks that a name is appropriate for a // PersistentVolumeName object. var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain -var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany)) +var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany), string(core.ReadWriteOncePod)) var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain)) var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions { - opts := PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), - } - if oldPv == nil { - // If there's no old PV, use the options based solely on feature enablement - return opts - } - if helper.ContainsAccessMode(oldPv.Spec.AccessModes, core.ReadWriteOncePod) { - // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object - opts.AllowReadWriteOncePod = true - } - return opts + return PersistentVolumeSpecValidationOptions{} } func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName string, validateInlinePersistentVolumeSpec bool, fldPath *field.Path, opts PersistentVolumeSpecValidationOptions) field.ErrorList { @@ -1701,15 +1688,10 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "")) } - expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes) - if opts.AllowReadWriteOncePod { - expandedSupportedAccessModes.Insert(string(core.ReadWriteOncePod)) - } - foundReadWriteOncePod, foundNonReadWriteOncePod := false, false for _, mode := range pvSpec.AccessModes { - if !expandedSupportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, expandedSupportedAccessModes.List())) + if !supportedAccessModes.Has(string(mode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) } if mode == core.ReadWriteOncePod { @@ -2016,17 +1998,16 @@ func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) f } type PersistentVolumeClaimSpecValidationOptions struct { - // Allow spec to contain the "ReadWiteOncePod" access mode - AllowReadWriteOncePod bool // Allow users to recover from previously failing expansion operation EnableRecoverFromExpansionFailure bool // Allow to validate the label value of the label selector AllowInvalidLabelValueInSelector bool + // Allow to validate the API group of the data source and data source reference + AllowInvalidAPIGroupInDataSourceOrRef bool } func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions { opts := PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure), AllowInvalidLabelValueInSelector: false, } @@ -2034,6 +2015,10 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum // If there's no old PVC, use the options based solely on feature enablement return opts } + + // If the old object had an invalid API group in the data source or data source reference, continue to allow it in the new object + opts.AllowInvalidAPIGroupInDataSourceOrRef = allowInvalidAPIGroupInDataSourceOrRef(&oldPvc.Spec) + labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{ AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector, } @@ -2042,11 +2027,6 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum opts.AllowInvalidLabelValueInSelector = true } - if helper.ContainsAccessMode(oldPvc.Spec.AccessModes, core.ReadWriteOncePod) { - // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object - opts.AllowReadWriteOncePod = true - } - if helper.ClaimContainsAllocatedResources(oldPvc) || helper.ClaimContainsAllocatedResourceStatus(oldPvc) { opts.EnableRecoverFromExpansionFailure = true @@ -2056,7 +2036,6 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTemplate *core.PersistentVolumeClaimTemplate) PersistentVolumeClaimSpecValidationOptions { opts := PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), AllowInvalidLabelValueInSelector: false, } if oldClaimTemplate == nil { @@ -2070,13 +2049,20 @@ func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTe // If the old object had an invalid label selector, continue to allow it in the new object opts.AllowInvalidLabelValueInSelector = true } - if helper.ContainsAccessMode(oldClaimTemplate.Spec.AccessModes, core.ReadWriteOncePod) { - // If the old object allowed "ReadWriteOncePod", continue to allow it in the new object - opts.AllowReadWriteOncePod = true - } return opts } +// allowInvalidAPIGroupInDataSourceOrRef returns true if the spec contains a data source or data source reference with an API group +func allowInvalidAPIGroupInDataSourceOrRef(spec *core.PersistentVolumeClaimSpec) bool { + if spec.DataSource != nil && spec.DataSource.APIGroup != nil { + return true + } + if spec.DataSourceRef != nil && spec.DataSourceRef.APIGroup != nil { + return true + } + return false +} + // ValidatePersistentVolumeClaim validates a PersistentVolumeClaim func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList { allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) @@ -2085,7 +2071,7 @@ func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, opts Persist } // validateDataSource validates a DataSource/DataSourceRef in a PersistentVolumeClaimSpec -func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *field.Path) field.ErrorList { +func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *field.Path, allowInvalidAPIGroupInDataSourceOrRef bool) field.ErrorList { allErrs := field.ErrorList{} if len(dataSource.Name) == 0 { @@ -2101,12 +2087,17 @@ func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *fie if len(apiGroup) == 0 && dataSource.Kind != "PersistentVolumeClaim" { allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup")) } + if len(apiGroup) > 0 && !allowInvalidAPIGroupInDataSourceOrRef { + for _, errString := range validation.IsDNS1123Subdomain(apiGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiGroup"), apiGroup, errString)) + } + } return allErrs } // validateDataSourceRef validates a DataSourceRef in a PersistentVolumeClaimSpec -func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path) field.ErrorList { +func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path, allowInvalidAPIGroupInDataSourceOrRef bool) field.ErrorList { allErrs := field.ErrorList{} if len(dataSourceRef.Name) == 0 { @@ -2122,6 +2113,11 @@ func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *fi if len(apiGroup) == 0 && dataSourceRef.Kind != "PersistentVolumeClaim" { allErrs = append(allErrs, field.Invalid(fldPath, dataSourceRef.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup")) } + if len(apiGroup) > 0 && !allowInvalidAPIGroupInDataSourceOrRef { + for _, errString := range validation.IsDNS1123Subdomain(apiGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiGroup"), apiGroup, errString)) + } + } if dataSourceRef.Namespace != nil && len(*dataSourceRef.Namespace) > 0 { for _, msg := range ValidateNameFunc(ValidateNamespaceName)(*dataSourceRef.Namespace, false) { @@ -2145,15 +2141,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...) } - expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes) - if opts.AllowReadWriteOncePod { - expandedSupportedAccessModes.Insert(string(core.ReadWriteOncePod)) - } - foundReadWriteOncePod, foundNonReadWriteOncePod := false, false for _, mode := range spec.AccessModes { - if !expandedSupportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, expandedSupportedAccessModes.List())) + if !supportedAccessModes.Has(string(mode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) } if mode == core.ReadWriteOncePod { @@ -2185,10 +2176,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld } if spec.DataSource != nil { - allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"))...) + allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"), opts.AllowInvalidAPIGroupInDataSourceOrRef)...) } if spec.DataSourceRef != nil { - allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...) + allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"), opts.AllowInvalidAPIGroupInDataSourceOrRef)...) } if spec.DataSourceRef != nil && spec.DataSourceRef.Namespace != nil && len(*spec.DataSourceRef.Namespace) > 0 { if spec.DataSource != nil { @@ -2859,52 +2850,52 @@ func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field return allErrs } -func validateLivenessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateLivenessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) if probe.SuccessThreshold != 1 { allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1")) } return allErrs } -func validateReadinessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateReadinessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) if probe.TerminationGracePeriodSeconds != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), probe.TerminationGracePeriodSeconds, "must not be set for readinessProbes")) } return allErrs } -func validateStartupProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateStartupProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateProbe(probe, fldPath)...) + allErrs = append(allErrs, validateProbe(probe, gracePeriod, fldPath)...) if probe.SuccessThreshold != 1 { allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1")) } return allErrs } -func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { +func validateProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { return allErrs } - allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), fldPath)...) + allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), gracePeriod, fldPath)...) allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) @@ -2939,6 +2930,7 @@ type commonHandler struct { HTTPGet *core.HTTPGetAction TCPSocket *core.TCPSocketAction GRPC *core.GRPCAction + Sleep *core.SleepAction } func handlerFromProbe(ph *core.ProbeHandler) commonHandler { @@ -2955,7 +2947,17 @@ func handlerFromLifecycle(lh *core.LifecycleHandler) commonHandler { Exec: lh.Exec, HTTPGet: lh.HTTPGet, TCPSocket: lh.TCPSocket, + Sleep: lh.Sleep, + } +} + +func validateSleepAction(sleep *core.SleepAction, gracePeriod int64, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if sleep.Seconds <= 0 || sleep.Seconds > gracePeriod { + invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d)", gracePeriod) + allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr)) } + return allErrors } func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { @@ -3066,7 +3068,7 @@ func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) fie func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList { return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port")) } -func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList { +func validateHandler(handler commonHandler, gracePeriod int64, fldPath *field.Path) field.ErrorList { numHandlers := 0 allErrors := field.ErrorList{} if handler.Exec != nil { @@ -3101,19 +3103,27 @@ func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList allErrors = append(allErrors, validateGRPCAction(handler.GRPC, fldPath.Child("grpc"))...) } } + if handler.Sleep != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("sleep"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateSleepAction(handler.Sleep, gracePeriod, fldPath.Child("sleep"))...) + } + } if numHandlers == 0 { allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) } return allErrors } -func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList { +func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if lifecycle.PostStart != nil { - allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), fldPath.Child("postStart"))...) + allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), gracePeriod, fldPath.Child("postStart"))...) } if lifecycle.PreStop != nil { - allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), fldPath.Child("preStop"))...) + allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), gracePeriod, fldPath.Child("preStop"))...) } return allErrs } @@ -3138,7 +3148,7 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error var supportedResizeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) var supportedResizePolicies = sets.NewString(string(core.NotRequired), string(core.RestartContainer)) -func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path) field.ErrorList { +func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path, podRestartPolicy *core.RestartPolicy) field.ErrorList { allErrors := field.ErrorList{} // validate that resource name is not repeated, supported resource names and policy values are specified @@ -3162,13 +3172,17 @@ func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *fiel default: allErrors = append(allErrors, field.NotSupported(fldPath, p.RestartPolicy, supportedResizePolicies.List())) } + + if *podRestartPolicy == core.RestartPolicyNever && p.RestartPolicy != core.NotRequired { + allErrors = append(allErrors, field.Invalid(fldPath, p.RestartPolicy, "must be 'NotRequired' when `restartPolicy` is 'Never'")) + } } return allErrors } // validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers. // Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates. -func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { var allErrs field.ErrorList if len(ephemeralContainers) == 0 { @@ -3189,7 +3203,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, idxPath := fldPath.Index(i) c := (*core.Container)(&ec.EphemeralContainerCommon) - allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts)...) + allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts, podRestartPolicy)...) // Ephemeral containers don't need looser constraints for pod templates, so it's convenient to apply both validations // here where we've already converted EphemeralContainerCommon to Container. allErrs = append(allErrs, validateContainerOnlyForPod(c, idxPath)...) @@ -3251,7 +3265,7 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er } // validateInitContainers is called by pod spec and template validation to validate the list of init containers -func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, gracePeriod int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { var allErrs field.ErrorList allNames := sets.String{} @@ -3262,7 +3276,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor idxPath := fldPath.Index(i) // Apply the validation common to all container types - allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts)...) + allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts, podRestartPolicy)...) restartAlways := false // Apply the validation specific to init containers @@ -3285,11 +3299,11 @@ func validateInitContainers(containers []core.Container, regularContainers []cor switch { case restartAlways: if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, idxPath.Child("lifecycle"))...) } - allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) - allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) - allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...) + allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, idxPath.Child("livenessProbe"))...) + allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, idxPath.Child("readinessProbe"))...) + allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, idxPath.Child("startupProbe"))...) default: // These fields are disallowed for init containers. @@ -3317,7 +3331,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor // validateContainerCommon applies validation common to all container types. It's called by regular, init, and ephemeral // container list validation to require a properly formatted name, image, etc. -func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, path *field.Path, opts PodValidationOptions) field.ErrorList { +func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, path *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { var allErrs field.ErrorList namePath := path.Child("name") @@ -3355,7 +3369,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...) allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...) allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...) - allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"))...) + allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"), podRestartPolicy)...) allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...) return allErrs } @@ -3365,7 +3379,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList // Only make the following checks if hostUsers is false (otherwise, the container uses the // same userns as the host, and so there isn't anything to check). - if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers == true { + if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers { return allErrs } @@ -3389,7 +3403,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList } // validateContainers is called by pod spec and template validation to validate the list of regular containers. -func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { +func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, gracePeriod int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy) field.ErrorList { allErrs := field.ErrorList{} if len(containers) == 0 { @@ -3401,7 +3415,7 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol path := fldPath.Index(i) // Apply validation common to all containers - allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts)...) + allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts, podRestartPolicy)...) // Container names must be unique within the list of regular containers. // Collisions with init or ephemeral container names will be detected by the init or ephemeral @@ -3417,11 +3431,11 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol // Regular init container and ephemeral container validation will return // field.Forbidden() for these paths. if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...) + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, gracePeriod, path.Child("lifecycle"))...) } - allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, path.Child("livenessProbe"))...) - allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, path.Child("readinessProbe"))...) - allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, path.Child("startupProbe"))...) + allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, gracePeriod, path.Child("livenessProbe"))...) + allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, gracePeriod, path.Child("readinessProbe"))...) + allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, gracePeriod, path.Child("startupProbe"))...) // These fields are disallowed for regular containers if ctr.RestartPolicy != nil { @@ -3943,13 +3957,19 @@ func validateHostIPs(pod *core.Pod) field.ErrorList { func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) field.ErrorList { allErrs := field.ErrorList{} + var gracePeriod int64 + if spec.TerminationGracePeriodSeconds != nil { + // this could happen in tests + gracePeriod = *spec.TerminationGracePeriodSeconds + } + vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts) allErrs = append(allErrs, vErrs...) podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims) allErrs = append(allErrs, validatePodResourceClaims(podMeta, spec.ResourceClaims, fldPath.Child("resourceClaims"))...) - allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...) - allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...) + allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("containers"), opts, &spec.RestartPolicy)...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, gracePeriod, fldPath.Child("initContainers"), opts, &spec.RestartPolicy)...) + allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts, &spec.RestartPolicy)...) allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) @@ -4329,6 +4349,7 @@ func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, allowInvalidL allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) } } + allErrs = append(allErrs, validateMatchLabelKeysAndMismatchLabelKeys(fldPath, podAffinityTerm.MatchLabelKeys, podAffinityTerm.MismatchLabelKeys, podAffinityTerm.LabelSelector)...) if len(podAffinityTerm.TopologyKey) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty")) } @@ -4785,19 +4806,8 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel return allErrs } - //TODO(vinaykul,InPlacePodVerticalScaling): With KEP 2527, we can rely on persistence of PodStatus.QOSClass - // We can use PodStatus.QOSClass instead of GetPodQOS here, in kubelet, and elsewhere, as PodStatus.QOSClass - // does not change once it is bootstrapped in podCreate. This needs to be addressed before beta as a - // separate PR covering all uses of GetPodQOS. With that change, we can drop the below block. - // Ref: https://github.com/kubernetes/kubernetes/pull/102884#discussion_r1093790446 - // Ref: https://github.com/kubernetes/kubernetes/pull/102884/#discussion_r663280487 - if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - // reject attempts to change pod qos - oldQoS := qos.GetPodQOS(oldPod) - newQoS := qos.GetPodQOS(newPod) - if newQoS != oldQoS { - allErrs = append(allErrs, field.Invalid(fldPath, newQoS, "Pod QoS is immutable")) - } + if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) { + allErrs = append(allErrs, field.Invalid(fldPath, newPod.Status.QOSClass, "Pod QoS is immutable")) } // handle updateable fields by munging those fields prior to deep equal comparison. @@ -4900,6 +4910,11 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone } } + + // Note: Unlike NodeAffinity and NodeSelector, we cannot make PodAffinity/PodAntiAffinity mutable due to the presence of the matchLabelKeys/mismatchLabelKeys feature. + // Those features automatically generate the matchExpressions in labelSelector for PodAffinity/PodAntiAffinity when the Pod is created. + // When we make them mutable, we need to make sure things like how to handle/validate matchLabelKeys, + // and what if the fieldManager/A sets matchexpressions and fieldManager/B sets matchLabelKeys later. (could it lead the understandable conflict, etc) } if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) { @@ -7165,7 +7180,7 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai if err := validateNodeInclusionPolicy(subFldPath.Child("nodeTaintsPolicy"), constraint.NodeTaintsPolicy); err != nil { allErrs = append(allErrs, err) } - allErrs = append(allErrs, validateMatchLabelKeys(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...) + allErrs = append(allErrs, validateMatchLabelKeysInTopologySpread(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...) if !opts.AllowInvalidTopologySpreadConstraintLabelSelector { allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, subFldPath.Child("labelSelector"))...) } @@ -7242,8 +7257,60 @@ func validateNodeInclusionPolicy(fldPath *field.Path, policy *core.NodeInclusion return nil } -// validateMatchLabelKeys tests that the elements are a valid label name and are not already included in labelSelector. -func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { +// validateMatchLabelKeysAndMismatchLabelKeys checks if both matchLabelKeys and mismatchLabelKeys are valid. +// - validate that all matchLabelKeys and mismatchLabelKeys are valid label names. +// - validate that the user doens't specify the same key in both matchLabelKeys and labelSelector. +// - validate that any matchLabelKeys are not duplicated with mismatchLabelKeys. +func validateMatchLabelKeysAndMismatchLabelKeys(fldPath *field.Path, matchLabelKeys, mismatchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { + var allErrs field.ErrorList + // 1. validate that all matchLabelKeys and mismatchLabelKeys are valid label names. + allErrs = append(allErrs, validateLabelKeys(fldPath.Child("matchLabelKeys"), matchLabelKeys, labelSelector)...) + allErrs = append(allErrs, validateLabelKeys(fldPath.Child("mismatchLabelKeys"), mismatchLabelKeys, labelSelector)...) + + // 2. validate that the user doens't specify the same key in both matchLabelKeys and labelSelector. + // It doesn't make sense to have the labelselector with the key specified in matchLabelKeys + // because the matchLabelKeys will be `In` labelSelector which matches with only one value in the key + // and we cannot make any further filtering with that key. + // On the other hand, we may want to have labelSelector with the key specified in mismatchLabelKeys. + // because the mismatchLabelKeys will be `NotIn` labelSelector + // and we may want to filter Pods further with other labelSelector with that key. + + // labelKeysMap is keyed by label key and valued by the index of label key in labelKeys. + if labelSelector != nil { + labelKeysMap := map[string]int{} + for i, key := range matchLabelKeys { + labelKeysMap[key] = i + } + labelSelectorKeys := sets.New[string]() + for key := range labelSelector.MatchLabels { + labelSelectorKeys.Insert(key) + } + for _, matchExpression := range labelSelector.MatchExpressions { + key := matchExpression.Key + if i, ok := labelKeysMap[key]; ok && labelSelectorKeys.Has(key) { + // Before validateLabelKeysWithSelector is called, the labelSelector has already got the selector created from matchLabelKeys. + // Here, we found the duplicate key in labelSelector and the key is specified in labelKeys. + // Meaning that the same key is specified in both labelSelector and matchLabelKeys/mismatchLabelKeys. + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), key, "exists in both matchLabelKeys and labelSelector")) + } + + labelSelectorKeys.Insert(key) + } + } + + // 3. validate that any matchLabelKeys are not duplicated with mismatchLabelKeys. + mismatchLabelKeysSet := sets.New(mismatchLabelKeys...) + for i, k := range matchLabelKeys { + if mismatchLabelKeysSet.Has(k) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("matchLabelKeys").Index(i), k, "exists in both matchLabelKeys and mismatchLabelKeys")) + } + } + + return allErrs +} + +// validateMatchLabelKeysInTopologySpread tests that the elements are a valid label name and are not already included in labelSelector. +func validateMatchLabelKeysInTopologySpread(fldPath *field.Path, matchLabelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { if len(matchLabelKeys) == 0 { return nil } @@ -7272,6 +7339,25 @@ func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelS return allErrs } +// validateLabelKeys tests that the label keys are a valid label name. +// It's intended to be used for matchLabelKeys or mismatchLabelKeys. +func validateLabelKeys(fldPath *field.Path, labelKeys []string, labelSelector *metav1.LabelSelector) field.ErrorList { + if len(labelKeys) == 0 { + return nil + } + + if labelSelector == nil { + return field.ErrorList{field.Forbidden(fldPath, "must not be specified when labelSelector is not set")} + } + + var allErrs field.ErrorList + for i, key := range labelKeys { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(key, fldPath.Index(i))...) + } + + return allErrs +} + // ValidateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,, // .spec.IPFamilies, .spec.ipFamilyPolicy. This is exported because it is used // during IP init and allocation. diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 647cbae6a5631..c9f4cb8af4e77 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -52,6 +52,7 @@ const ( dnsLabelErrMsg = "a lowercase RFC 1123 label must consist of" dnsSubdomainLabelErrMsg = "a lowercase RFC 1123 subdomain" envVarNameErrMsg = "a valid environment variable name must consist of" + defaultGracePeriod = int64(30) ) var ( @@ -108,9 +109,8 @@ func TestValidatePersistentVolumes(t *testing.T) { validMode := core.PersistentVolumeFilesystem invalidMode := core.PersistentVolumeMode("fakeVolumeMode") scenarios := map[string]struct { - isExpectedFailure bool - enableReadWriteOncePod bool - volume *core.PersistentVolume + isExpectedFailure bool + volume *core.PersistentVolume }{ "good-volume": { isExpectedFailure: false, @@ -252,25 +252,8 @@ func TestValidatePersistentVolumes(t *testing.T) { VolumeMode: &invalidMode, }), }, - "with-read-write-once-pod-feature-gate-enabled": { - isExpectedFailure: false, - enableReadWriteOncePod: true, - volume: testVolume("foo", "", core.PersistentVolumeSpec{ - Capacity: core.ResourceList{ - core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), - }, - AccessModes: []core.PersistentVolumeAccessMode{"ReadWriteOncePod"}, - PersistentVolumeSource: core.PersistentVolumeSource{ - HostPath: &core.HostPathVolumeSource{ - Path: "/foo", - Type: newHostPathType(string(core.HostPathDirectory)), - }, - }, - }), - }, - "with-read-write-once-pod-feature-gate-disabled": { - isExpectedFailure: true, - enableReadWriteOncePod: false, + "with-read-write-once-pod": { + isExpectedFailure: false, volume: testVolume("foo", "", core.PersistentVolumeSpec{ Capacity: core.ResourceList{ core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), @@ -284,9 +267,8 @@ func TestValidatePersistentVolumes(t *testing.T) { }, }), }, - "with-read-write-once-pod-and-others-feature-gate-enabled": { - isExpectedFailure: true, - enableReadWriteOncePod: true, + "with-read-write-once-pod-and-others": { + isExpectedFailure: true, volume: testVolume("foo", "", core.PersistentVolumeSpec{ Capacity: core.ResourceList{ core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), @@ -500,8 +482,6 @@ func TestValidatePersistentVolumes(t *testing.T) { for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, scenario.enableReadWriteOncePod)() - opts := ValidationOptionsForPersistentVolume(scenario.volume, nil) errs := ValidatePersistentVolume(scenario.volume, opts) if len(errs) == 0 && scenario.isExpectedFailure { @@ -902,51 +882,17 @@ func TestValidatePersistentVolumeSourceUpdate(t *testing.T) { func TestValidationOptionsForPersistentVolume(t *testing.T) { tests := map[string]struct { - oldPv *core.PersistentVolume - enableReadWriteOncePod bool - expectValidationOpts PersistentVolumeSpecValidationOptions + oldPv *core.PersistentVolume + expectValidationOpts PersistentVolumeSpecValidationOptions }{ "nil old pv": { - oldPv: nil, - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, - }, - "rwop allowed because feature enabled": { - oldPv: pvWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOnce}), - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, - }, - "rwop not allowed because not used and feature disabled": { - oldPv: pvWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOnce}), - enableReadWriteOncePod: false, - expectValidationOpts: PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: false, - }, - }, - "rwop allowed because used and feature enabled": { - oldPv: pvWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOncePod}), - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, - }, - "rwop allowed because used and feature disabled": { - oldPv: pvWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOncePod}), - enableReadWriteOncePod: false, - expectValidationOpts: PersistentVolumeSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, + oldPv: nil, + expectValidationOpts: PersistentVolumeSpecValidationOptions{}, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tc.enableReadWriteOncePod)() - opts := ValidationOptionsForPersistentVolume(nil, tc.oldPv) if opts != tc.expectValidationOpts { t.Errorf("Expected opts: %+v, received: %+v", opts, tc.expectValidationOpts) @@ -972,26 +918,18 @@ func getCSIVolumeWithSecret(pv *core.PersistentVolume, secret *core.SecretRefere return pvCopy } -func pvWithAccessModes(accessModes []core.PersistentVolumeAccessMode) *core.PersistentVolume { - return &core.PersistentVolume{ - Spec: core.PersistentVolumeSpec{ - AccessModes: accessModes, - }, - } -} -func pvcWithAccessModes(accessModes []core.PersistentVolumeAccessMode) *core.PersistentVolumeClaim { +func pvcWithDataSource(dataSource *core.TypedLocalObjectReference) *core.PersistentVolumeClaim { return &core.PersistentVolumeClaim{ Spec: core.PersistentVolumeClaimSpec{ - AccessModes: accessModes, + DataSource: dataSource, }, } } - -func pvcTemplateWithAccessModes(accessModes []core.PersistentVolumeAccessMode) *core.PersistentVolumeClaimTemplate { - return &core.PersistentVolumeClaimTemplate{ +func pvcWithDataSourceRef(ref *core.TypedObjectReference) *core.PersistentVolumeClaim { + return &core.PersistentVolumeClaim{ Spec: core.PersistentVolumeClaimSpec{ - AccessModes: accessModes, + DataSourceRef: ref, }, } } @@ -1545,6 +1483,7 @@ func testVolumeClaimStorageClassInAnnotationAndNilInSpec(name, namespace, scName func testValidatePVC(t *testing.T, ephemeral bool) { invalidClassName := "-invalid-" validClassName := "valid" + invalidAPIGroup := "^invalid" invalidMode := core.PersistentVolumeMode("fakeVolumeMode") validMode := core.PersistentVolumeFilesystem goodName := "foo" @@ -1577,9 +1516,8 @@ func testValidatePVC(t *testing.T, ephemeral bool) { ten := int64(10) scenarios := map[string]struct { - isExpectedFailure bool - enableReadWriteOncePod bool - claim *core.PersistentVolumeClaim + isExpectedFailure bool + claim *core.PersistentVolumeClaim }{ "good-claim": { isExpectedFailure: false, @@ -1717,21 +1655,8 @@ func testValidatePVC(t *testing.T, ephemeral bool) { return claim }(), }, - "with-read-write-once-pod-feature-gate-enabled": { - isExpectedFailure: false, - enableReadWriteOncePod: true, - claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{ - AccessModes: []core.PersistentVolumeAccessMode{"ReadWriteOncePod"}, - Resources: core.VolumeResourceRequirements{ - Requests: core.ResourceList{ - core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), - }, - }, - }), - }, - "with-read-write-once-pod-feature-gate-disabled": { - isExpectedFailure: true, - enableReadWriteOncePod: false, + "with-read-write-once-pod": { + isExpectedFailure: false, claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{ AccessModes: []core.PersistentVolumeAccessMode{"ReadWriteOncePod"}, Resources: core.VolumeResourceRequirements{ @@ -1741,9 +1666,8 @@ func testValidatePVC(t *testing.T, ephemeral bool) { }, }), }, - "with-read-write-once-pod-and-others-feature-gate-enabled": { - isExpectedFailure: true, - enableReadWriteOncePod: true, + "with-read-write-once-pod-and-others": { + isExpectedFailure: true, claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{ AccessModes: []core.PersistentVolumeAccessMode{"ReadWriteOncePod", "ReadWriteMany"}, Resources: core.VolumeResourceRequirements{ @@ -1934,12 +1858,46 @@ func testValidatePVC(t *testing.T, ephemeral bool) { }, }), }, + "invaild-apigroup-in-data-source": { + isExpectedFailure: true, + claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteOnce, + }, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), + }, + }, + DataSource: &core.TypedLocalObjectReference{ + APIGroup: &invalidAPIGroup, + Kind: "Foo", + Name: "foo1", + }, + }), + }, + "invaild-apigroup-in-data-source-ref": { + isExpectedFailure: true, + claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteOnce, + }, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), + }, + }, + DataSourceRef: &core.TypedObjectReference{ + APIGroup: &invalidAPIGroup, + Kind: "Foo", + Name: "foo1", + }, + }), + }, } for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, scenario.enableReadWriteOncePod)() - var errs field.ErrorList if ephemeral { volumes := []core.Volume{{ @@ -2057,6 +2015,7 @@ func TestAlphaPVVolumeModeUpdate(t *testing.T) { func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { block := core.PersistentVolumeBlock file := core.PersistentVolumeFilesystem + invaildAPIGroup := "^invalid" validClaim := testVolumeClaimWithStatus("foo", "ns", core.PersistentVolumeClaimSpec{ AccessModes: []core.PersistentVolumeAccessMode{ @@ -2427,6 +2386,42 @@ func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { }, }) + invalidClaimDataSourceAPIGroup := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteOnce, + }, + VolumeMode: &file, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), + }, + }, + VolumeName: "volume", + DataSource: &core.TypedLocalObjectReference{ + APIGroup: &invaildAPIGroup, + Kind: "Foo", + Name: "foo", + }, + }) + + invalidClaimDataSourceRefAPIGroup := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteOnce, + }, + VolumeMode: &file, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), + }, + }, + VolumeName: "volume", + DataSourceRef: &core.TypedObjectReference{ + APIGroup: &invaildAPIGroup, + Kind: "Foo", + Name: "foo", + }, + }) + scenarios := map[string]struct { isExpectedFailure bool oldClaim *core.PersistentVolumeClaim @@ -2631,6 +2626,16 @@ func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { enableRecoverFromExpansion: true, isExpectedFailure: true, }, + "allow-update-pvc-when-data-source-used": { + oldClaim: invalidClaimDataSourceAPIGroup, + newClaim: invalidClaimDataSourceAPIGroup, + isExpectedFailure: false, + }, + "allow-update-pvc-when-data-source-ref-used": { + oldClaim: invalidClaimDataSourceRefAPIGroup, + newClaim: invalidClaimDataSourceRefAPIGroup, + isExpectedFailure: false, + }, } for name, scenario := range scenarios { @@ -2651,57 +2656,34 @@ func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { } func TestValidationOptionsForPersistentVolumeClaim(t *testing.T) { + invaildAPIGroup := "^invalid" + tests := map[string]struct { - oldPvc *core.PersistentVolumeClaim - enableReadWriteOncePod bool - expectValidationOpts PersistentVolumeClaimSpecValidationOptions + oldPvc *core.PersistentVolumeClaim + expectValidationOpts PersistentVolumeClaimSpecValidationOptions }{ "nil pv": { - oldPvc: nil, - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - EnableRecoverFromExpansionFailure: false, - }, - }, - "rwop allowed because feature enabled": { - oldPvc: pvcWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOnce}), - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - EnableRecoverFromExpansionFailure: false, - }, - }, - "rwop not allowed because not used and feature disabled": { - oldPvc: pvcWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOnce}), - enableReadWriteOncePod: false, + oldPvc: nil, expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: false, EnableRecoverFromExpansionFailure: false, }, }, - "rwop allowed because used and feature enabled": { - oldPvc: pvcWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOncePod}), - enableReadWriteOncePod: true, + "invaild apiGroup in dataSource allowed because the old pvc is used": { + oldPvc: pvcWithDataSource(&core.TypedLocalObjectReference{APIGroup: &invaildAPIGroup}), expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - EnableRecoverFromExpansionFailure: false, + AllowInvalidAPIGroupInDataSourceOrRef: true, }, }, - "rwop allowed because used and feature disabled": { - oldPvc: pvcWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOncePod}), - enableReadWriteOncePod: false, + "invaild apiGroup in dataSourceRef allowed because the old pvc is used": { + oldPvc: pvcWithDataSourceRef(&core.TypedObjectReference{APIGroup: &invaildAPIGroup}), expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - EnableRecoverFromExpansionFailure: false, + AllowInvalidAPIGroupInDataSourceOrRef: true, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tc.enableReadWriteOncePod)() - opts := ValidationOptionsForPersistentVolumeClaim(nil, tc.oldPvc) if opts != tc.expectValidationOpts { t.Errorf("Expected opts: %+v, received: %+v", tc.expectValidationOpts, opts) @@ -2712,51 +2694,17 @@ func TestValidationOptionsForPersistentVolumeClaim(t *testing.T) { func TestValidationOptionsForPersistentVolumeClaimTemplate(t *testing.T) { tests := map[string]struct { - oldPvcTemplate *core.PersistentVolumeClaimTemplate - enableReadWriteOncePod bool - expectValidationOpts PersistentVolumeClaimSpecValidationOptions + oldPvcTemplate *core.PersistentVolumeClaimTemplate + expectValidationOpts PersistentVolumeClaimSpecValidationOptions }{ "nil pv": { - oldPvcTemplate: nil, - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, - }, - "rwop allowed because feature enabled": { - oldPvcTemplate: pvcTemplateWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOnce}), - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, - }, - "rwop not allowed because not used and feature disabled": { - oldPvcTemplate: pvcTemplateWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOnce}), - enableReadWriteOncePod: false, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: false, - }, - }, - "rwop allowed because used and feature enabled": { - oldPvcTemplate: pvcTemplateWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOncePod}), - enableReadWriteOncePod: true, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, - }, - "rwop allowed because used and feature disabled": { - oldPvcTemplate: pvcTemplateWithAccessModes([]core.PersistentVolumeAccessMode{core.ReadWriteOncePod}), - enableReadWriteOncePod: false, - expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{ - AllowReadWriteOncePod: true, - }, + oldPvcTemplate: nil, + expectValidationOpts: PersistentVolumeClaimSpecValidationOptions{}, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tc.enableReadWriteOncePod)() - opts := ValidationOptionsForPersistentVolumeClaimTemplate(nil, tc.oldPvcTemplate) if opts != tc.expectValidationOpts { t.Errorf("Expected opts: %+v, received: %+v", opts, tc.expectValidationOpts) @@ -5613,7 +5561,6 @@ func TestHugePagesEnv(t *testing.T) { // enable gate for _, testCase := range testCases { t.Run(testCase.Name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DownwardAPIHugePages, true)() opts := PodValidationOptions{} if errs := validateEnvVarValueFrom(testCase, field.NewPath("field"), opts); len(errs) != 0 { t.Errorf("expected success, got: %v", errs) @@ -6503,7 +6450,7 @@ func TestValidateProbe(t *testing.T) { } for _, p := range successCases { - if errs := validateProbe(p, field.NewPath("field")); len(errs) != 0 { + if errs := validateProbe(p, defaultGracePeriod, field.NewPath("field")); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } @@ -6515,7 +6462,7 @@ func TestValidateProbe(t *testing.T) { errorCases = append(errorCases, probe) } for _, p := range errorCases { - if errs := validateProbe(p, field.NewPath("field")); len(errs) == 0 { + if errs := validateProbe(p, defaultGracePeriod, field.NewPath("field")); len(errs) == 0 { t.Errorf("expected failure for %v", p) } } @@ -6621,7 +6568,7 @@ func Test_validateProbe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := validateProbe(tt.args.probe, tt.args.fldPath) + got := validateProbe(tt.args.probe, defaultGracePeriod, tt.args.fldPath) if len(got) != len(tt.want) { t.Errorf("validateProbe() = %v, want %v", got, tt.want) return @@ -6646,7 +6593,7 @@ func TestValidateHandler(t *testing.T) { {HTTPGet: &core.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []core.HTTPHeader{{Name: "X-Forwarded-For", Value: "1.2.3.4"}, {Name: "X-Forwarded-For", Value: "5.6.7.8"}}}}, } for _, h := range successCases { - if errs := validateHandler(handlerFromProbe(&h), field.NewPath("field")); len(errs) != 0 { + if errs := validateHandler(handlerFromProbe(&h), defaultGracePeriod, field.NewPath("field")); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } @@ -6661,7 +6608,7 @@ func TestValidateHandler(t *testing.T) { {HTTPGet: &core.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []core.HTTPHeader{{Name: "X_Forwarded_For", Value: "foo.example.com"}}}}, } for _, h := range errorCases { - if errs := validateHandler(handlerFromProbe(&h), field.NewPath("field")); len(errs) == 0 { + if errs := validateHandler(handlerFromProbe(&h), defaultGracePeriod, field.NewPath("field")); len(errs) == 0 { t.Errorf("expected failure for %#v", h) } } @@ -6715,80 +6662,127 @@ func TestValidateResizePolicy(t *testing.T) { tSupportedResizeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) tSupportedResizePolicies := sets.NewString(string(core.NotRequired), string(core.RestartContainer)) type T struct { - PolicyList []core.ContainerResizePolicy - ExpectError bool - Errors field.ErrorList + PolicyList []core.ContainerResizePolicy + ExpectError bool + Errors field.ErrorList + PodRestartPolicy core.RestartPolicy } + testCases := map[string]T{ "ValidCPUandMemoryPolicies": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "cpu", RestartPolicy: "NotRequired"}, {ResourceName: "memory", RestartPolicy: "RestartContainer"}, }, - false, - nil, + ExpectError: false, + Errors: nil, + PodRestartPolicy: "Always", }, "ValidCPUPolicy": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "cpu", RestartPolicy: "RestartContainer"}, }, - false, - nil, + ExpectError: false, + Errors: nil, + PodRestartPolicy: "Always", }, "ValidMemoryPolicy": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "memory", RestartPolicy: "NotRequired"}, }, - false, - nil, + ExpectError: false, + Errors: nil, + PodRestartPolicy: "Always", }, "NoPolicy": { - []core.ContainerResizePolicy{}, - false, - nil, + PolicyList: []core.ContainerResizePolicy{}, + ExpectError: false, + Errors: nil, + PodRestartPolicy: "Always", }, "ValidCPUandInvalidMemoryPolicy": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "cpu", RestartPolicy: "NotRequired"}, {ResourceName: "memory", RestartPolicy: "Restarrrt"}, }, - true, - field.ErrorList{field.NotSupported(field.NewPath("field"), core.ResourceResizeRestartPolicy("Restarrrt"), tSupportedResizePolicies.List())}, + ExpectError: true, + Errors: field.ErrorList{field.NotSupported(field.NewPath("field"), core.ResourceResizeRestartPolicy("Restarrrt"), tSupportedResizePolicies.List())}, + PodRestartPolicy: "Always", }, "ValidMemoryandInvalidCPUPolicy": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "cpu", RestartPolicy: "RestartNotRequirrred"}, {ResourceName: "memory", RestartPolicy: "RestartContainer"}, }, - true, - field.ErrorList{field.NotSupported(field.NewPath("field"), core.ResourceResizeRestartPolicy("RestartNotRequirrred"), tSupportedResizePolicies.List())}, + ExpectError: true, + Errors: field.ErrorList{field.NotSupported(field.NewPath("field"), core.ResourceResizeRestartPolicy("RestartNotRequirrred"), tSupportedResizePolicies.List())}, + PodRestartPolicy: "Always", }, "InvalidResourceNameValidPolicy": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "cpuuu", RestartPolicy: "NotRequired"}, }, - true, - field.ErrorList{field.NotSupported(field.NewPath("field"), core.ResourceName("cpuuu"), tSupportedResizeResources.List())}, + ExpectError: true, + Errors: field.ErrorList{field.NotSupported(field.NewPath("field"), core.ResourceName("cpuuu"), tSupportedResizeResources.List())}, + PodRestartPolicy: "Always", }, "ValidResourceNameMissingPolicy": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "memory", RestartPolicy: ""}, }, - true, - field.ErrorList{field.Required(field.NewPath("field"), "")}, + ExpectError: true, + Errors: field.ErrorList{field.Required(field.NewPath("field"), "")}, + PodRestartPolicy: "Always", }, "RepeatedPolicies": { - []core.ContainerResizePolicy{ + PolicyList: []core.ContainerResizePolicy{ {ResourceName: "cpu", RestartPolicy: "NotRequired"}, {ResourceName: "memory", RestartPolicy: "RestartContainer"}, {ResourceName: "cpu", RestartPolicy: "RestartContainer"}, }, - true, - field.ErrorList{field.Duplicate(field.NewPath("field").Index(2), core.ResourceCPU)}, + ExpectError: true, + Errors: field.ErrorList{field.Duplicate(field.NewPath("field").Index(2), core.ResourceCPU)}, + PodRestartPolicy: "Always", + }, + "InvalidCPUPolicyWithPodRestartPolicy": { + PolicyList: []core.ContainerResizePolicy{ + {ResourceName: "cpu", RestartPolicy: "NotRequired"}, + {ResourceName: "memory", RestartPolicy: "RestartContainer"}, + }, + ExpectError: true, + Errors: field.ErrorList{field.Invalid(field.NewPath("field"), core.ResourceResizeRestartPolicy("RestartContainer"), "must be 'NotRequired' when `restartPolicy` is 'Never'")}, + PodRestartPolicy: "Never", + }, + "InvalidMemoryPolicyWithPodRestartPolicy": { + PolicyList: []core.ContainerResizePolicy{ + {ResourceName: "cpu", RestartPolicy: "RestartContainer"}, + {ResourceName: "memory", RestartPolicy: "NotRequired"}, + }, + ExpectError: true, + Errors: field.ErrorList{field.Invalid(field.NewPath("field"), core.ResourceResizeRestartPolicy("RestartContainer"), "must be 'NotRequired' when `restartPolicy` is 'Never'")}, + PodRestartPolicy: "Never", + }, + "InvalidMemoryCPUPolicyWithPodRestartPolicy": { + PolicyList: []core.ContainerResizePolicy{ + {ResourceName: "cpu", RestartPolicy: "RestartContainer"}, + {ResourceName: "memory", RestartPolicy: "RestartContainer"}, + }, + ExpectError: true, + Errors: field.ErrorList{field.Invalid(field.NewPath("field"), core.ResourceResizeRestartPolicy("RestartContainer"), "must be 'NotRequired' when `restartPolicy` is 'Never'"), field.Invalid(field.NewPath("field"), core.ResourceResizeRestartPolicy("RestartContainer"), "must be 'NotRequired' when `restartPolicy` is 'Never'")}, + PodRestartPolicy: "Never", + }, + "ValidMemoryCPUPolicyWithPodRestartPolicy": { + PolicyList: []core.ContainerResizePolicy{ + {ResourceName: "cpu", RestartPolicy: "NotRequired"}, + {ResourceName: "memory", RestartPolicy: "NotRequired"}, + }, + ExpectError: false, + Errors: nil, + PodRestartPolicy: "Never", }, } for k, v := range testCases { - errs := validateResizePolicy(v.PolicyList, field.NewPath("field")) + errs := validateResizePolicy(v.PolicyList, field.NewPath("field"), &v.PodRestartPolicy) if !v.ExpectError && len(errs) > 0 { t.Errorf("Testcase %s - expected success, got error: %+v", k, errs) } @@ -6884,7 +6878,19 @@ func TestValidateEphemeralContainers(t *testing.T) { }, }}, } { - if errs := validateEphemeralContainers(ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}); len(errs) != 0 { + var PodRestartPolicy core.RestartPolicy + PodRestartPolicy = "Never" + if errs := validateEphemeralContainers(ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}, &PodRestartPolicy); len(errs) != 0 { + t.Errorf("expected success for '%s' but got errors: %v", title, errs) + } + + PodRestartPolicy = "Always" + if errs := validateEphemeralContainers(ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}, &PodRestartPolicy); len(errs) != 0 { + t.Errorf("expected success for '%s' but got errors: %v", title, errs) + } + + PodRestartPolicy = "OnFailure" + if errs := validateEphemeralContainers(ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}, &PodRestartPolicy); len(errs) != 0 { t.Errorf("expected success for '%s' but got errors: %v", title, errs) } } @@ -7203,9 +7209,25 @@ func TestValidateEphemeralContainers(t *testing.T) { }, } + var PodRestartPolicy core.RestartPolicy + for _, tc := range tcs { t.Run(tc.title+"__@L"+tc.line, func(t *testing.T) { - errs := validateEphemeralContainers(tc.ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}) + + PodRestartPolicy = "Never" + errs := validateEphemeralContainers(tc.ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}, &PodRestartPolicy) + if len(errs) == 0 { + t.Fatal("expected error but received none") + } + + PodRestartPolicy = "Always" + errs = validateEphemeralContainers(tc.ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}, &PodRestartPolicy) + if len(errs) == 0 { + t.Fatal("expected error but received none") + } + + PodRestartPolicy = "OnFailure" + errs = validateEphemeralContainers(tc.ephemeralContainers, containers, initContainers, vols, nil, field.NewPath("ephemeralContainers"), PodValidationOptions{}, &PodRestartPolicy) if len(errs) == 0 { t.Fatal("expected error but received none") } @@ -7503,7 +7525,10 @@ func TestValidateContainers(t *testing.T) { }, }, } - if errs := validateContainers(successCase, volumeDevices, nil, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 { + + var PodRestartPolicy core.RestartPolicy + PodRestartPolicy = "Always" + if errs := validateContainers(successCase, volumeDevices, nil, defaultGracePeriod, field.NewPath("field"), PodValidationOptions{}, &PodRestartPolicy); len(errs) != 0 { t.Errorf("expected success: %v", errs) } @@ -8114,9 +8139,10 @@ func TestValidateContainers(t *testing.T) { field.ErrorList{{Type: field.ErrorTypeForbidden, Field: "containers[0].restartPolicy"}}, }, } + for _, tc := range errorCases { t.Run(tc.title+"__@L"+tc.line, func(t *testing.T) { - errs := validateContainers(tc.containers, volumeDevices, nil, field.NewPath("containers"), PodValidationOptions{}) + errs := validateContainers(tc.containers, volumeDevices, nil, defaultGracePeriod, field.NewPath("containers"), PodValidationOptions{}, &PodRestartPolicy) if len(errs) == 0 { t.Fatal("expected error but received none") } @@ -8204,7 +8230,9 @@ func TestValidateInitContainers(t *testing.T) { }, }, } - if errs := validateInitContainers(successCase, containers, volumeDevices, nil, field.NewPath("field"), PodValidationOptions{}); len(errs) != 0 { + var PodRestartPolicy core.RestartPolicy + PodRestartPolicy = "Never" + if errs := validateInitContainers(successCase, containers, volumeDevices, nil, defaultGracePeriod, field.NewPath("field"), PodValidationOptions{}, &PodRestartPolicy); len(errs) != 0 { t.Errorf("expected success: %v", errs) } @@ -8580,9 +8608,10 @@ func TestValidateInitContainers(t *testing.T) { field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop", BadValue: ""}}, }, } + for _, tc := range errorCases { t.Run(tc.title+"__@L"+tc.line, func(t *testing.T) { - errs := validateInitContainers(tc.initContainers, containers, volumeDevices, nil, field.NewPath("initContainers"), PodValidationOptions{}) + errs := validateInitContainers(tc.initContainers, containers, volumeDevices, nil, defaultGracePeriod, field.NewPath("initContainers"), PodValidationOptions{}, &PodRestartPolicy) if len(errs) == 0 { t.Fatal("expected error but received none") } @@ -10003,49 +10032,248 @@ func TestValidatePod(t *testing.T) { DNSPolicy: core.DNSClusterFirst, }, }, - } - for k, v := range successCases { - t.Run(k, func(t *testing.T) { - if errs := ValidatePodCreate(&v, PodValidationOptions{}); len(errs) != 0 { - t.Errorf("expected success: %v", errs) - } - }) - } - - errorCases := map[string]struct { - spec core.Pod - expectedError string - }{ - "bad name": { - expectedError: "metadata.name", - spec: core.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: "ns"}, - Spec: core.PodSpec{ - RestartPolicy: core.RestartPolicyAlways, - DNSPolicy: core.DNSClusterFirst, - Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + "MatchLabelKeys/MismatchLabelKeys in required PodAffinity": { + ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Affinity: &core.Affinity{ + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + { + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key3", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key2"}, + MismatchLabelKeys: []string{"key3"}, + }, + }, + }, }, }, }, - "image whitespace": { - expectedError: "spec.containers[0].image", - spec: core.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: "ns"}, - Spec: core.PodSpec{ - RestartPolicy: core.RestartPolicyAlways, - DNSPolicy: core.DNSClusterFirst, - Containers: []core.Container{{Name: "ctr", Image: " ", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + "MatchLabelKeys/MismatchLabelKeys in preferred PodAffinity": { + ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Affinity: &core.Affinity{ + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + { + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key3", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key2"}, + MismatchLabelKeys: []string{"key3"}, + }, + }, + }, + }, }, }, }, - "image leading and trailing whitespace": { - expectedError: "spec.containers[0].image", - spec: core.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: "ns"}, - Spec: core.PodSpec{ - RestartPolicy: core.RestartPolicyAlways, - DNSPolicy: core.DNSClusterFirst, - Containers: []core.Container{{Name: "ctr", Image: " something ", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + "MatchLabelKeys/MismatchLabelKeys in required PodAntiAffinity": { + ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Affinity: &core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + { + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key3", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key2"}, + MismatchLabelKeys: []string{"key3"}, + }, + }, + }, + }, + }, + }, + "MatchLabelKeys/MismatchLabelKeys in preferred PodAntiAffinity": { + ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Affinity: &core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + { + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key3", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key2"}, + MismatchLabelKeys: []string{"key3"}, + }, + }, + }, + }, + }, + }, + }, + "LabelSelector can have the same key as MismatchLabelKeys": { + // Note: On the contrary, in case of matchLabelKeys, keys in matchLabelKeys are not allowed to be specified in labelSelector by users. + ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: core.PodSpec{ + Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Affinity: &core.Affinity{ + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + { + // This is the same key as in MismatchLabelKeys + // but it's allowed. + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key2", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MismatchLabelKeys: []string{"key2"}, + }, + }, + }, + }, + }, + }, + } + + for k, v := range successCases { + t.Run(k, func(t *testing.T) { + if errs := ValidatePodCreate(&v, PodValidationOptions{}); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + }) + } + + errorCases := map[string]struct { + spec core.Pod + expectedError string + }{ + "bad name": { + expectedError: "metadata.name", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "", Namespace: "ns"}, + Spec: core.PodSpec{ + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + }, + }, + }, + "image whitespace": { + expectedError: "spec.containers[0].image", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: "ns"}, + Spec: core.PodSpec{ + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Containers: []core.Container{{Name: "ctr", Image: " ", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, + }, + }, + }, + "image leading and trailing whitespace": { + expectedError: "spec.containers[0].image", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: "ns"}, + Spec: core.PodSpec{ + RestartPolicy: core.RestartPolicyAlways, + DNSPolicy: core.DNSClusterFirst, + Containers: []core.Container{{Name: "ctr", Image: " something ", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}}, }, }, }, @@ -10149,223 +10377,703 @@ func TestValidatePod(t *testing.T) { }), }, }, - "invalid node field selector requirement in node affinity, invalid operator": { - expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchFields[0].operator", + "invalid node field selector requirement in node affinity, invalid operator": { + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchFields[0].operator", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + NodeAffinity: &core.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{ + NodeSelectorTerms: []core.NodeSelectorTerm{{ + MatchFields: []core.NodeSelectorRequirement{{ + Key: "metadata.name", + Operator: core.NodeSelectorOpExists, + }}, + }}, + }, + }, + }), + }, + }, + "invalid node field selector requirement in node affinity, invalid key": { + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchFields[0].key", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + NodeAffinity: &core.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{ + NodeSelectorTerms: []core.NodeSelectorTerm{{ + MatchFields: []core.NodeSelectorRequirement{{ + Key: "metadata.namespace", + Operator: core.NodeSelectorOpIn, + Values: []string{"ns1"}, + }}, + }}, + }, + }, + }), + }, + }, + "invalid preferredSchedulingTerm in node affinity, weight should be in range 1-100": { + expectedError: "must be in the range 1-100", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + NodeAffinity: &core.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.PreferredSchedulingTerm{{ + Weight: 199, + Preference: core.NodeSelectorTerm{ + MatchExpressions: []core.NodeSelectorRequirement{{ + Key: "foo", + Operator: core.NodeSelectorOpIn, + Values: []string{"bar"}, + }}, + }, + }}, + }, + }), + }, + }, + "invalid requiredDuringSchedulingIgnoredDuringExecution node selector, nodeSelectorTerms must have at least one term": { + expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + NodeAffinity: &core.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{ + NodeSelectorTerms: []core.NodeSelectorTerm{}, + }, + }, + }), + }, + }, + "invalid weight in preferredDuringSchedulingIgnoredDuringExecution in pod affinity annotations, weight should be in range 1-100": { + expectedError: "must be in the range 1-100", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 109, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + TopologyKey: "region", + }, + }}, + }, + }), + }, + }, + "invalid labelSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, values should be empty if the operator is Exists": { + expectedError: "spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + TopologyKey: "region", + }, + }}, + }, + }), + }, + }, + "invalid namespaceSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity, In operator must include Values": { + expectedError: "spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.namespaceSelector.matchExpressions[0].values", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + NamespaceSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + }}, + }, + Namespaces: []string{"ns"}, + TopologyKey: "region", + }, + }}, + }, + }), + }, + }, + "invalid namespaceSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity, Exists operator can not have values": { + expectedError: "spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.namespaceSelector.matchExpressions[0].values", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + NamespaceSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + TopologyKey: "region", + }, + }}, + }, + }), + }, + }, + "invalid name space in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, namespace should be valid": { + expectedError: "spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.namespace", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpExists, + }}, + }, + Namespaces: []string{"INVALID_NAMESPACE"}, + TopologyKey: "region", + }, + }}, + }, + }), + }, + }, + "invalid hard pod affinity, empty topologyKey is not allowed for hard pod affinity": { + expectedError: "can not be empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + }}, + }, + }), + }, + }, + "invalid hard pod anti-affinity, empty topologyKey is not allowed for hard pod anti-affinity": { + expectedError: "can not be empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + }}, + }, + }), + }, + }, + "invalid soft pod affinity, empty topologyKey is not allowed for soft pod affinity": { + expectedError: "can not be empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + }, + }}, + }, + }), + }, + }, + "invalid soft pod anti-affinity, empty topologyKey is not allowed for soft pod anti-affinity": { + expectedError: "can not be empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "key2", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }}, + }, + Namespaces: []string{"ns"}, + }, + }}, + }, + }), + }, + }, + "invalid soft pod affinity, key in MatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"/simple"}, + }, + }, + }, + }, + }), + }, + }, + "invalid hard pod affinity, key in MatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"/simple"}, + }, + }, + }, + }), + }, + }, + "invalid soft pod anti-affinity, key in MatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", + spec: core.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "123", + Namespace: "ns", + }, + Spec: validPodSpec(&core.Affinity{ + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"/simple"}, + }, + }, + }, + }, + }), + }, + }, + "invalid hard pod anti-affinity, key in MatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - NodeAffinity: &core.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{ - NodeSelectorTerms: []core.NodeSelectorTerm{{ - MatchFields: []core.NodeSelectorRequirement{{ - Key: "metadata.name", - Operator: core.NodeSelectorOpExists, - }}, - }}, + PodAntiAffinity: &core.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"/simple"}, + }, }, }, }), }, }, - "invalid node field selector requirement in node affinity, invalid key": { - expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchFields[0].key", + "invalid soft pod affinity, key in MismatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - NodeAffinity: &core.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{ - NodeSelectorTerms: []core.NodeSelectorTerm{{ - MatchFields: []core.NodeSelectorRequirement{{ - Key: "metadata.namespace", - Operator: core.NodeSelectorOpIn, - Values: []string{"ns1"}, - }}, - }}, + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MismatchLabelKeys: []string{"/simple"}, + }, + }, }, }, }), }, }, - "invalid preferredSchedulingTerm in node affinity, weight should be in range 1-100": { - expectedError: "must be in the range 1-100", + "invalid hard pod affinity, key in MismatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - NodeAffinity: &core.NodeAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.PreferredSchedulingTerm{{ - Weight: 199, - Preference: core.NodeSelectorTerm{ - MatchExpressions: []core.NodeSelectorRequirement{{ - Key: "foo", - Operator: core.NodeSelectorOpIn, - Values: []string{"bar"}, - }}, + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MismatchLabelKeys: []string{"/simple"}, }, - }}, + }, }, }), }, }, - "invalid requiredDuringSchedulingIgnoredDuringExecution node selector, nodeSelectorTerms must have at least one term": { - expectedError: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", + "invalid soft pod anti-affinity, key in MismatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - NodeAffinity: &core.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{ - NodeSelectorTerms: []core.NodeSelectorTerm{}, + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MismatchLabelKeys: []string{"/simple"}, + }, + }, }, }, }), }, }, - "invalid weight in preferredDuringSchedulingIgnoredDuringExecution in pod affinity annotations, weight should be in range 1-100": { - expectedError: "must be in the range 1-100", + "invalid hard pod anti-affinity, key in MismatchLabelKeys isn't correctly defined": { + expectedError: "prefix part must be non-empty", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - PodAffinity: &core.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 109, - PodAffinityTerm: core.PodAffinityTerm{ + PodAntiAffinity: &core.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpNotIn, - Values: []string{"value1", "value2"}, - }}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, }, - Namespaces: []string{"ns"}, - TopologyKey: "region", + TopologyKey: "k8s.io/zone", + MismatchLabelKeys: []string{"/simple"}, }, - }}, + }, }, }), }, }, - "invalid labelSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, values should be empty if the operator is Exists": { - expectedError: "spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values", + "invalid soft pod affinity, key exists in both matchLabelKeys and labelSelector": { + expectedError: "exists in both matchLabelKeys and labelSelector", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", + Labels: map[string]string{"key": "value1"}, }, Spec: validPodSpec(&core.Affinity{ - PodAntiAffinity: &core.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 10, - PodAffinityTerm: core.PodAffinityTerm{ - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpExists, - Values: []string{"value1", "value2"}, - }}, + PodAffinity: &core.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + // This one should be created from MatchLabelKeys. + { + Key: "key", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key"}, }, - Namespaces: []string{"ns"}, - TopologyKey: "region", }, - }}, + }, }, }), }, }, - "invalid namespaceSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity, In operator must include Values": { - expectedError: "spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.namespaceSelector.matchExpressions[0].values", + "invalid hard pod affinity, key exists in both matchLabelKeys and labelSelector": { + expectedError: "exists in both matchLabelKeys and labelSelector", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", + Labels: map[string]string{"key": "value1"}, }, Spec: validPodSpec(&core.Affinity{ - PodAntiAffinity: &core.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 10, - PodAffinityTerm: core.PodAffinityTerm{ - NamespaceSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpIn, - }}, + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + // This one should be created from MatchLabelKeys. + { + Key: "key", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value2"}, + }, + }, }, - Namespaces: []string{"ns"}, - TopologyKey: "region", + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key"}, }, - }}, + }, }, }), }, }, - "invalid namespaceSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity, Exists operator can not have values": { - expectedError: "spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.namespaceSelector.matchExpressions[0].values", + "invalid soft pod anti-affinity, key exists in both matchLabelKeys and labelSelector": { + expectedError: "exists in both matchLabelKeys and labelSelector", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", + Labels: map[string]string{"key": "value1"}, }, Spec: validPodSpec(&core.Affinity{ PodAntiAffinity: &core.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 10, - PodAffinityTerm: core.PodAffinityTerm{ - NamespaceSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpExists, - Values: []string{"value1", "value2"}, - }}, + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + // This one should be created from MatchLabelKeys. + { + Key: "key", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key"}, }, - Namespaces: []string{"ns"}, - TopologyKey: "region", }, - }}, + }, }, }), }, }, - "invalid name space in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, namespace should be valid": { - expectedError: "spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.namespace", + "invalid hard pod anti-affinity, key exists in both matchLabelKeys and labelSelector": { + expectedError: "exists in both matchLabelKeys and labelSelector", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", + Labels: map[string]string{"key": "value1"}, }, Spec: validPodSpec(&core.Affinity{ - PodAffinity: &core.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 10, - PodAffinityTerm: core.PodAffinityTerm{ + PodAntiAffinity: &core.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpExists, - }}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + // This one should be created from MatchLabelKeys. + { + Key: "key", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"value1"}, + }, + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value2"}, + }, + }, }, - Namespaces: []string{"INVALID_NAMESPACE"}, - TopologyKey: "region", + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"key"}, }, - }}, + }, }, }), }, }, - "invalid hard pod affinity, empty topologyKey is not allowed for hard pod affinity": { - expectedError: "can not be empty", + "invalid soft pod affinity, key exists in both MatchLabelKeys and MismatchLabelKeys": { + expectedError: "exists in both matchLabelKeys and mismatchLabelKeys", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", @@ -10373,71 +11081,92 @@ func TestValidatePod(t *testing.T) { }, Spec: validPodSpec(&core.Affinity{ PodAffinity: &core.PodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{{ - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"value1", "value2"}, - }}, + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"samekey"}, + MismatchLabelKeys: []string{"samekey"}, + }, }, - Namespaces: []string{"ns"}, - }}, + }, }, }), }, }, - "invalid hard pod anti-affinity, empty topologyKey is not allowed for hard pod anti-affinity": { - expectedError: "can not be empty", + "invalid hard pod affinity, key exists in both MatchLabelKeys and MismatchLabelKeys": { + expectedError: "exists in both matchLabelKeys and mismatchLabelKeys", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - PodAntiAffinity: &core.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{{ - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"value1", "value2"}, - }}, + PodAffinity: &core.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"samekey"}, + MismatchLabelKeys: []string{"samekey"}, }, - Namespaces: []string{"ns"}, - }}, + }, }, }), }, }, - "invalid soft pod affinity, empty topologyKey is not allowed for soft pod affinity": { - expectedError: "can not be empty", + "invalid soft pod anti-affinity, key exists in both MatchLabelKeys and MismatchLabelKeys": { + expectedError: "exists in both matchLabelKeys and mismatchLabelKeys", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", Namespace: "ns", }, Spec: validPodSpec(&core.Affinity{ - PodAffinity: &core.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 10, - PodAffinityTerm: core.PodAffinityTerm{ - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpNotIn, - Values: []string{"value1", "value2"}, - }}, + PodAntiAffinity: &core.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{ + { + Weight: 10, + PodAffinityTerm: core.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, + }, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"samekey"}, + MismatchLabelKeys: []string{"samekey"}, }, - Namespaces: []string{"ns"}, }, - }}, + }, }, }), }, }, - "invalid soft pod anti-affinity, empty topologyKey is not allowed for soft pod anti-affinity": { - expectedError: "can not be empty", + "invalid hard pod anti-affinity, key exists in both MatchLabelKeys and MismatchLabelKeys": { + expectedError: "exists in both matchLabelKeys and mismatchLabelKeys", spec: core.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "123", @@ -10445,19 +11174,22 @@ func TestValidatePod(t *testing.T) { }, Spec: validPodSpec(&core.Affinity{ PodAntiAffinity: &core.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{{ - Weight: 10, - PodAffinityTerm: core.PodAffinityTerm{ + RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{ + { LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "key2", - Operator: metav1.LabelSelectorOpNotIn, - Values: []string{"value1", "value2"}, - }}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, }, - Namespaces: []string{"ns"}, + TopologyKey: "k8s.io/zone", + MatchLabelKeys: []string{"samekey"}, + MismatchLabelKeys: []string{"samekey"}, }, - }}, + }, }, }), }, @@ -10948,7 +11680,7 @@ func TestValidatePod(t *testing.T) { } for k, v := range errorCases { t.Run(k, func(t *testing.T) { - if errs := ValidatePodCreate(&v.spec, PodValidationOptions{AllowInvalidPodDeletionCost: false}); len(errs) == 0 { + if errs := ValidatePodCreate(&v.spec, PodValidationOptions{}); len(errs) == 0 { t.Errorf("expected failure") } else if v.expectedError == "" { t.Errorf("missing expectedError, got %q", errs.ToAggregate().Error()) @@ -21561,11 +22293,13 @@ func TestValidateTopologySpreadConstraints(t *testing.T) { WhenUnsatisfiable: core.DoNotSchedule, MatchLabelKeys: []string{"foo"}, LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{{ - Key: "foo", - Operator: metav1.LabelSelectorOpNotIn, - Values: []string{"value1", "value2"}, - }}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"value1", "value2"}, + }, + }, }, }}, wantFieldErrors: field.ErrorList{field.Invalid(fieldPathMatchLabelKeys.Index(0), "foo", "exists in both matchLabelKeys and labelSelector")}, @@ -23507,3 +24241,57 @@ func TestValidateLoadBalancerStatus(t *testing.T) { }) } } + +func TestValidateSleepAction(t *testing.T) { + fldPath := field.NewPath("root") + getInvalidStr := func(gracePeriod int64) string { + return fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d)", gracePeriod) + } + + testCases := []struct { + name string + action *core.SleepAction + gracePeriod int64 + expectErr field.ErrorList + }{ + { + name: "valid setting", + action: &core.SleepAction{ + Seconds: 5, + }, + gracePeriod: 30, + }, + { + name: "negative seconds", + action: &core.SleepAction{ + Seconds: -1, + }, + gracePeriod: 30, + expectErr: field.ErrorList{field.Invalid(fldPath, -1, getInvalidStr(30))}, + }, + { + name: "longer than gracePeriod", + action: &core.SleepAction{ + Seconds: 5, + }, + gracePeriod: 3, + expectErr: field.ErrorList{field.Invalid(fldPath, 5, getInvalidStr(3))}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + errs := validateSleepAction(tc.action, tc.gracePeriod, fldPath) + + if len(tc.expectErr) > 0 && len(errs) == 0 { + t.Errorf("Unexpected success") + } else if len(tc.expectErr) == 0 && len(errs) != 0 { + t.Errorf("Unexpected error(s): %v", errs) + } else if len(tc.expectErr) > 0 { + if tc.expectErr[0].Error() != errs[0].Error() { + t.Errorf("Unexpected error(s): %v", errs) + } + } + }) + } +} diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index cf8ba45cc950d..01b597f46e2ff 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -2045,6 +2045,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } + if in.Sleep != nil { + in, out := &in.Sleep, &out.Sleep + *out = new(SleepAction) + **out = **in + } return } @@ -3479,6 +3484,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5671,6 +5686,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SleepAction) DeepCopyInto(out *SleepAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. +func (in *SleepAction) DeepCopy() *SleepAction { + if in == nil { + return nil + } + out := new(SleepAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in diff --git a/pkg/apis/discovery/types.go b/pkg/apis/discovery/types.go index 5838407e1a107..4ae1693347a66 100644 --- a/pkg/apis/discovery/types.go +++ b/pkg/apis/discovery/types.go @@ -170,7 +170,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/pkg/apis/networking/register.go b/pkg/apis/networking/register.go index 570a6a4db3bc5..0e1a01af46fae 100644 --- a/pkg/apis/networking/register.go +++ b/pkg/apis/networking/register.go @@ -52,8 +52,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, ) diff --git a/pkg/apis/networking/types.go b/pkg/apis/networking/types.go index 9ec17540baefc..83a7a55f38ec7 100644 --- a/pkg/apis/networking/types.go +++ b/pkg/apis/networking/types.go @@ -18,7 +18,6 @@ package networking import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/apis/core" ) @@ -599,71 +598,6 @@ type ServiceBackendPort struct { // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta - - metav1.ObjectMeta - - Spec ClusterCIDRSpec -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *api.NodeSelector - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterCIDRList contains a list of ClusterCIDRs. -type ClusterCIDRList struct { - metav1.TypeMeta - - // +optional - metav1.ListMeta - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, @@ -695,9 +629,6 @@ type ParentReference struct { Namespace string // Name is the name of the object being referenced. Name string - // UID is the uid of the object being referenced. - // +optional - UID types.UID } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/networking/v1alpha1/zz_generated.conversion.go b/pkg/apis/networking/v1alpha1/zz_generated.conversion.go index 43e5c004eeeea..28a388c7d589f 100644 --- a/pkg/apis/networking/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/networking/v1alpha1/zz_generated.conversion.go @@ -24,12 +24,9 @@ package v1alpha1 import ( unsafe "unsafe" - v1 "k8s.io/api/core/v1" v1alpha1 "k8s.io/api/networking/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - core "k8s.io/kubernetes/pkg/apis/core" networking "k8s.io/kubernetes/pkg/apis/networking" ) @@ -40,36 +37,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDR)(nil), (*networking.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(a.(*v1alpha1.ClusterCIDR), b.(*networking.ClusterCIDR), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDR)(nil), (*v1alpha1.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(a.(*networking.ClusterCIDR), b.(*v1alpha1.ClusterCIDR), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRList)(nil), (*networking.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(a.(*v1alpha1.ClusterCIDRList), b.(*networking.ClusterCIDRList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRList)(nil), (*v1alpha1.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(a.(*networking.ClusterCIDRList), b.(*v1alpha1.ClusterCIDRList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRSpec)(nil), (*networking.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(a.(*v1alpha1.ClusterCIDRSpec), b.(*networking.ClusterCIDRSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRSpec)(nil), (*v1alpha1.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(a.(*networking.ClusterCIDRSpec), b.(*v1alpha1.ClusterCIDRSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1alpha1.IPAddress)(nil), (*networking.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_IPAddress_To_networking_IPAddress(a.(*v1alpha1.IPAddress), b.(*networking.IPAddress), scope) }); err != nil { @@ -113,80 +80,6 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } -func autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR is an autogenerated conversion function. -func Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in, out, s) -} - -func autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR is an autogenerated conversion function. -func Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error { - return autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in, out, s) -} - -func autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]networking.ClusterCIDR)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList is an autogenerated conversion function. -func Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in, out, s) -} - -func autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.ClusterCIDR)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList is an autogenerated conversion function. -func Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error { - return autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in, out, s) -} - -func autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error { - out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector)) - out.PerNodeHostBits = in.PerNodeHostBits - out.IPv4 = in.IPv4 - out.IPv6 = in.IPv6 - return nil -} - -// Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec is an autogenerated conversion function. -func Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in, out, s) -} - -func autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error { - out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector)) - out.PerNodeHostBits = in.PerNodeHostBits - out.IPv4 = in.IPv4 - out.IPv6 = in.IPv6 - return nil -} - -// Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec is an autogenerated conversion function. -func Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error { - return autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in, out, s) -} - func autoConvert_v1alpha1_IPAddress_To_networking_IPAddress(in *v1alpha1.IPAddress, out *networking.IPAddress, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil { @@ -260,7 +153,6 @@ func autoConvert_v1alpha1_ParentReference_To_networking_ParentReference(in *v1al out.Resource = in.Resource out.Namespace = in.Namespace out.Name = in.Name - out.UID = types.UID(in.UID) return nil } @@ -274,7 +166,6 @@ func autoConvert_networking_ParentReference_To_v1alpha1_ParentReference(in *netw out.Resource = in.Resource out.Namespace = in.Namespace out.Name = in.Name - out.UID = types.UID(in.UID) return nil } diff --git a/pkg/apis/networking/validation/validation.go b/pkg/apis/networking/validation/validation.go index 8492891471855..4f577f51c0fb3 100644 --- a/pkg/apis/networking/validation/validation.go +++ b/pkg/apis/networking/validation/validation.go @@ -21,7 +21,6 @@ import ( "net/netip" "strings" - v1 "k8s.io/api/core/v1" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" pathvalidation "k8s.io/apimachinery/pkg/api/validation/path" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -649,92 +648,6 @@ func allowInvalidWildcardHostRule(oldIngress *networking.Ingress) bool { return false } -// ValidateClusterCIDRName validates that the given name can be used as an -// ClusterCIDR name. -var ValidateClusterCIDRName = apimachineryvalidation.NameIsDNSLabel - -// ValidateClusterCIDR validates a ClusterCIDR. -func ValidateClusterCIDR(cc *networking.ClusterCIDR) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&cc.ObjectMeta, false, ValidateClusterCIDRName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateClusterCIDRSpec(&cc.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateClusterCIDRSpec validates ClusterCIDR Spec. -func ValidateClusterCIDRSpec(spec *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - if spec.NodeSelector != nil { - allErrs = append(allErrs, apivalidation.ValidateNodeSelector(spec.NodeSelector, fldPath.Child("nodeSelector"))...) - } - - // Validate if CIDR is specified for at least one IP Family(IPv4/IPv6). - if spec.IPv4 == "" && spec.IPv6 == "" { - allErrs = append(allErrs, field.Required(fldPath, "one or both of `ipv4` and `ipv6` must be specified")) - return allErrs - } - - // Validate specified IPv4 CIDR and PerNodeHostBits. - if spec.IPv4 != "" { - allErrs = append(allErrs, validateCIDRConfig(spec.IPv4, spec.PerNodeHostBits, 32, v1.IPv4Protocol, fldPath)...) - } - - // Validate specified IPv6 CIDR and PerNodeHostBits. - if spec.IPv6 != "" { - allErrs = append(allErrs, validateCIDRConfig(spec.IPv6, spec.PerNodeHostBits, 128, v1.IPv6Protocol, fldPath)...) - } - - return allErrs -} - -func validateCIDRConfig(configCIDR string, perNodeHostBits, maxMaskSize int32, ipFamily v1.IPFamily, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - minPerNodeHostBits := int32(4) - - ip, ipNet, err := netutils.ParseCIDRSloppy(configCIDR) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, fmt.Sprintf("must be a valid CIDR: %s", configCIDR))) - return allErrs - } - - if ipFamily == v1.IPv4Protocol && !netutils.IsIPv4(ip) { - allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv4 CIDR")) - } - if ipFamily == v1.IPv6Protocol && !netutils.IsIPv6(ip) { - allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv6 CIDR")) - } - - // Validate PerNodeHostBits - maskSize, _ := ipNet.Mask.Size() - maxPerNodeHostBits := maxMaskSize - int32(maskSize) - - if perNodeHostBits < minPerNodeHostBits { - allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be greater than or equal to %d", minPerNodeHostBits))) - } - if perNodeHostBits > maxPerNodeHostBits { - allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be less than or equal to %d", maxPerNodeHostBits))) - } - return allErrs -} - -// ValidateClusterCIDRUpdate tests if an update to a ClusterCIDR is valid. -func ValidateClusterCIDRUpdate(update, old *networking.ClusterCIDR) field.ErrorList { - var allErrs field.ErrorList - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, validateClusterCIDRUpdateSpec(&update.Spec, &old.Spec, field.NewPath("spec"))...) - return allErrs -} - -func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.NodeSelector, old.NodeSelector, fldPath.Child("nodeSelector"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.PerNodeHostBits, old.PerNodeHostBits, fldPath.Child("perNodeHostBits"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv4, old.IPv4, fldPath.Child("ipv4"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv6, old.IPv6, fldPath.Child("ipv6"))...) - - return allErrs -} - // ValidateIPAddressName validates that the name is the decimal representation of an IP address. // IPAddress does not support generating names, prefix is not considered. func ValidateIPAddressName(name string, prefix bool) []string { diff --git a/pkg/apis/networking/validation/validation_test.go b/pkg/apis/networking/validation/validation_test.go index 94b5ed738797a..b73935e44c959 100644 --- a/pkg/apis/networking/validation/validation_test.go +++ b/pkg/apis/networking/validation/validation_test.go @@ -1837,191 +1837,6 @@ func TestValidateIngressStatusUpdate(t *testing.T) { } } -func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector { - return &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{{ - MatchExpressions: []api.NodeSelectorRequirement{{ - Key: key, - Operator: op, - Values: values, - }}, - }}, - } -} - -func makeClusterCIDR(perNodeHostBits int32, ipv4, ipv6 string, nodeSelector *api.NodeSelector) *networking.ClusterCIDR { - return &networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - ResourceVersion: "9", - }, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4, - IPv6: ipv6, - NodeSelector: nodeSelector, - }, - } -} - -func TestValidateClusterCIDR(t *testing.T) { - testCases := []struct { - name string - cc *networking.ClusterCIDR - expectErr bool - }{{ - name: "valid SingleStack IPv4 ClusterCIDR", - cc: makeClusterCIDR(8, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits = maxPerNodeHostBits", - cc: makeClusterCIDR(16, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits > minPerNodeHostBits", - cc: makeClusterCIDR(4, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR", - cc: makeClusterCIDR(8, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits = maxPerNodeHostBit", - cc: makeClusterCIDR(64, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits > minPerNodeHostBit", - cc: makeClusterCIDR(4, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR perNodeHostBits=100", - cc: makeClusterCIDR(100, "", "fd00:1:1::/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid DualStack ClusterCIDR", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid DualStack ClusterCIDR, no NodeSelector", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", nil), - expectErr: false, - }, - // Failure cases. - { - name: "invalid ClusterCIDR, no IPv4 or IPv6 CIDR", - cc: makeClusterCIDR(8, "", "", nil), - expectErr: true, - }, { - name: "invalid ClusterCIDR, invalid nodeSelector", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("NoUppercaseOrSpecialCharsLike=Equals", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - // IPv4 tests. - { - name: "invalid SingleStack IPv4 ClusterCIDR, invalid spec.IPv4", - cc: makeClusterCIDR(8, "test", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid Singlestack IPv4 ClusterCIDR, perNodeHostBits > maxPerNodeHostBits", - cc: makeClusterCIDR(100, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv4 ClusterCIDR, perNodeHostBits < minPerNodeHostBits", - cc: makeClusterCIDR(2, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - // IPv6 tests. - { - name: "invalid SingleStack IPv6 ClusterCIDR, invalid spec.IPv6", - cc: makeClusterCIDR(8, "", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv6 ClusterCIDR, valid IPv4 CIDR in spec.IPv6", - cc: makeClusterCIDR(8, "", "10.2.0.0/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits", - cc: makeClusterCIDR(12, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits < minPerNodeHostBits", - cc: makeClusterCIDR(3, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - // DualStack tests - { - name: "invalid DualStack ClusterCIDR, valid spec.IPv4, invalid spec.IPv6", - cc: makeClusterCIDR(8, "10.1.0.0/16", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid DualStack ClusterCIDR, valid spec.IPv6, invalid spec.IPv4", - cc: makeClusterCIDR(8, "testv4", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid DualStack ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits", - cc: makeClusterCIDR(24, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid DualStack ClusterCIDR, valid IPv6 CIDR in spec.IPv4", - cc: makeClusterCIDR(8, "fd00::/120", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - err := ValidateClusterCIDR(testCase.cc) - if !testCase.expectErr && err != nil { - t.Errorf("ValidateClusterCIDR(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err) - } - if testCase.expectErr && err == nil { - t.Errorf("ValidateClusterCIDR(%+v) must return an error for test: %s, but got nil", testCase.cc, testCase.name) - } - }) - } -} - -func TestValidateClusterConfigUpdate(t *testing.T) { - oldCCC := makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})) - - testCases := []struct { - name string - cc *networking.ClusterCIDR - expectErr bool - }{{ - name: "Successful update, no changes to ClusterCIDR.Spec", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "Failed update, update spec.PerNodeHostBits", - cc: makeClusterCIDR(12, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "Failed update, update spec.IPv4", - cc: makeClusterCIDR(8, "10.2.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "Failed update, update spec.IPv6", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:2:/112", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "Failed update, update spec.NodeSelector", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar2"})), - expectErr: true, - }} - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - err := ValidateClusterCIDRUpdate(testCase.cc, oldCCC) - if !testCase.expectErr && err != nil { - t.Errorf("ValidateClusterCIDRUpdate(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err) - } - if testCase.expectErr && err == nil { - t.Errorf("ValidateClusterCIDRUpdate(%+v) must return error for test: %s, but got nil", testCase.cc, testCase.name) - } - }) - } -} - func TestValidateIPAddress(t *testing.T) { testCases := map[string]struct { expectedErrors int diff --git a/pkg/apis/networking/zz_generated.deepcopy.go b/pkg/apis/networking/zz_generated.deepcopy.go index 3a39c6cac4088..5752aa40ce868 100644 --- a/pkg/apis/networking/zz_generated.deepcopy.go +++ b/pkg/apis/networking/zz_generated.deepcopy.go @@ -28,87 +28,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { - if in == nil { - return nil - } - out := new(ClusterCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { - if in == nil { - return nil - } - out := new(ClusterCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(core.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { - if in == nil { - return nil - } - out := new(ClusterCIDRSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in diff --git a/pkg/apis/storage/types.go b/pkg/apis/storage/types.go index 8e2778991aff5..a700544c47c56 100644 --- a/pkg/apis/storage/types.go +++ b/pkg/apis/storage/types.go @@ -303,7 +303,7 @@ type CSIDriverSpec struct { // NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/pkg/controller/apis/config/v1alpha1/zz_generated.defaults.go b/pkg/controller/apis/config/v1alpha1/zz_generated.defaults.go index bd27200c5a46f..b441808f2ec32 100644 --- a/pkg/controller/apis/config/v1alpha1/zz_generated.defaults.go +++ b/pkg/controller/apis/config/v1alpha1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1alpha1 import ( runtime "k8s.io/apimachinery/pkg/runtime" - cloudproviderconfigv1alpha1 "k8s.io/cloud-provider/config/v1alpha1" + configv1alpha1 "k8s.io/cloud-provider/config/v1alpha1" v1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1" ) @@ -39,5 +39,5 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_KubeControllerManagerConfiguration(in *v1alpha1.KubeControllerManagerConfiguration) { SetDefaults_KubeControllerManagerConfiguration(in) - cloudproviderconfigv1alpha1.SetDefaults_KubeCloudSharedConfiguration(&in.KubeCloudShared) + configv1alpha1.SetDefaults_KubeCloudSharedConfiguration(&in.KubeCloudShared) } diff --git a/pkg/controller/cronjob/cronjob_controllerv2.go b/pkg/controller/cronjob/cronjob_controllerv2.go index ac4d63eb8a0d0..bb2da6bf08009 100644 --- a/pkg/controller/cronjob/cronjob_controllerv2.go +++ b/pkg/controller/cronjob/cronjob_controllerv2.go @@ -585,6 +585,7 @@ func (jm *ControllerV2) syncCronJob( } } + jobAlreadyExists := false jobReq, err := getJobFromTemplate2(cronJob, *scheduledTime) if err != nil { logger.Error(err, "Unable to make Job from template", "cronjob", klog.KObj(cronJob)) @@ -597,18 +598,41 @@ func (jm *ControllerV2) syncCronJob( // anything because any creation will fail return nil, updateStatus, err case errors.IsAlreadyExists(err): - // If the job is created by other actor, assume it has updated the cronjob status accordingly - logger.Info("Job already exists", "cronjob", klog.KObj(cronJob), "job", klog.KObj(jobReq)) - return nil, updateStatus, err + // If the job is created by other actor, assume it has updated the cronjob status accordingly. + // However, if the job was created by cronjob controller, this means we've previously created the job + // but failed to update the active list in the status, in which case we should reattempt to add the job + // into the active list and update the status. + jobAlreadyExists = true + job, err := jm.jobControl.GetJob(jobReq.GetNamespace(), jobReq.GetName()) + if err != nil { + return nil, updateStatus, err + } + jobResp = job + + // check that this job is owned by cronjob controller, otherwise do nothing and assume external controller + // is updating the status. + if !metav1.IsControlledBy(job, cronJob) { + return nil, updateStatus, nil + } + + // Recheck if the job is missing from the active list before attempting to update the status again. + found := inActiveList(cronJob, job.ObjectMeta.UID) + if found { + return nil, updateStatus, nil + } case err != nil: // default error handling jm.recorder.Eventf(cronJob, corev1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err) return nil, updateStatus, err } - metrics.CronJobCreationSkew.Observe(jobResp.ObjectMeta.GetCreationTimestamp().Sub(*scheduledTime).Seconds()) - logger.V(4).Info("Created Job", "job", klog.KObj(jobResp), "cronjob", klog.KObj(cronJob)) - jm.recorder.Eventf(cronJob, corev1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name) + if jobAlreadyExists { + logger.Info("Job already exists", "cronjob", klog.KObj(cronJob), "job", klog.KObj(jobReq)) + } else { + metrics.CronJobCreationSkew.Observe(jobResp.ObjectMeta.GetCreationTimestamp().Sub(*scheduledTime).Seconds()) + logger.V(4).Info("Created Job", "job", klog.KObj(jobResp), "cronjob", klog.KObj(cronJob)) + jm.recorder.Eventf(cronJob, corev1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name) + } // ------------------------------------------------------------------ // diff --git a/pkg/controller/cronjob/cronjob_controllerv2_test.go b/pkg/controller/cronjob/cronjob_controllerv2_test.go index 876adcc75b35a..ed1436713ad93 100644 --- a/pkg/controller/cronjob/cronjob_controllerv2_test.go +++ b/pkg/controller/cronjob/cronjob_controllerv2_test.go @@ -466,10 +466,22 @@ func TestControllerV2SyncCronJob(t *testing.T) { jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), jobCreateError: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, ""), - expectErr: true, + expectErr: false, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, + "prev ran but done, is time, job not present in CJ active status, create job failed, A": { + concurrencyPolicy: "Allow", + schedule: onTheHour, + deadline: noDead, + ranPreviously: true, + jobCreationTime: justAfterThePriorHour(), + now: *justAfterTheHour(), + jobCreateError: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, ""), + expectErr: false, + expectUpdateStatus: true, + jobPresentInCJActiveStatus: false, + }, "prev ran but done, is time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, @@ -1812,3 +1824,103 @@ func TestControllerV2CleanupFinishedJobs(t *testing.T) { }) } } + +// TestControllerV2JobAlreadyExistsButNotInActiveStatus validates that an already created job that was not added to the status +// of a CronJob initially will be added back on the next sync. Previously, if we failed to update the status after creating a job, +// cronjob controller would retry continuously because it would attempt to create a job that already exists. +func TestControllerV2JobAlreadyExistsButNotInActiveStatus(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + + cj := cronJob() + cj.Spec.ConcurrencyPolicy = "Forbid" + cj.Spec.Schedule = everyHour + cj.Status.LastScheduleTime = &metav1.Time{Time: justBeforeThePriorHour()} + cj.Status.Active = []v1.ObjectReference{} + cjCopy := cj.DeepCopy() + + job, err := getJobFromTemplate2(&cj, justAfterThePriorHour()) + if err != nil { + t.Fatalf("Unexpected error creating a job from template: %v", err) + } + job.UID = "1234" + job.Namespace = cj.Namespace + + client := fake.NewSimpleClientset(cjCopy, job) + informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) + _ = informerFactory.Batch().V1().CronJobs().Informer().GetIndexer().Add(cjCopy) + + jm, err := NewControllerV2(ctx, informerFactory.Batch().V1().Jobs(), informerFactory.Batch().V1().CronJobs(), client) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + jobControl := &fakeJobControl{Job: job, CreateErr: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, "")} + jm.jobControl = jobControl + cronJobControl := &fakeCJControl{} + jm.cronJobControl = cronJobControl + jm.now = justBeforeTheHour + + jm.enqueueController(cjCopy) + jm.processNextWorkItem(ctx) + + if len(cronJobControl.Updates) != 1 { + t.Fatalf("Unexpected updates to cronjob, got: %d, expected 1", len(cronJobControl.Updates)) + } + if len(cronJobControl.Updates[0].Status.Active) != 1 { + t.Errorf("Unexpected active jobs count, got: %d, expected 1", len(cronJobControl.Updates[0].Status.Active)) + } + + expectedActiveRef, err := getRef(job) + if err != nil { + t.Fatalf("Error getting expected job ref: %v", err) + } + if !reflect.DeepEqual(cronJobControl.Updates[0].Status.Active[0], *expectedActiveRef) { + t.Errorf("Unexpected job reference in cronjob active list, got: %v, expected: %v", cronJobControl.Updates[0].Status.Active[0], expectedActiveRef) + } +} + +// TestControllerV2JobAlreadyExistsButDifferentOwnner validates that an already created job +// not owned by the cronjob controller is ignored. +func TestControllerV2JobAlreadyExistsButDifferentOwner(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + + cj := cronJob() + cj.Spec.ConcurrencyPolicy = "Forbid" + cj.Spec.Schedule = everyHour + cj.Status.LastScheduleTime = &metav1.Time{Time: justBeforeThePriorHour()} + cj.Status.Active = []v1.ObjectReference{} + cjCopy := cj.DeepCopy() + + job, err := getJobFromTemplate2(&cj, justAfterThePriorHour()) + if err != nil { + t.Fatalf("Unexpected error creating a job from template: %v", err) + } + job.UID = "1234" + job.Namespace = cj.Namespace + + // remove owners for this test since we are testing that jobs not belonging to cronjob + // controller are safely ignored + job.OwnerReferences = []metav1.OwnerReference{} + + client := fake.NewSimpleClientset(cjCopy, job) + informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) + _ = informerFactory.Batch().V1().CronJobs().Informer().GetIndexer().Add(cjCopy) + + jm, err := NewControllerV2(ctx, informerFactory.Batch().V1().Jobs(), informerFactory.Batch().V1().CronJobs(), client) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + jobControl := &fakeJobControl{Job: job, CreateErr: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, "")} + jm.jobControl = jobControl + cronJobControl := &fakeCJControl{} + jm.cronJobControl = cronJobControl + jm.now = justBeforeTheHour + + jm.enqueueController(cjCopy) + jm.processNextWorkItem(ctx) + + if len(cronJobControl.Updates) != 0 { + t.Fatalf("Unexpected updates to cronjob, got: %d, expected 0", len(cronJobControl.Updates)) + } +} diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index 1ca6dd819a22c..b88504d118217 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -36,6 +36,27 @@ import ( // Utilities for dealing with Jobs and CronJobs and time. +type missedSchedulesType int + +const ( + noneMissed missedSchedulesType = iota + fewMissed + manyMissed +) + +func (e missedSchedulesType) String() string { + switch e { + case noneMissed: + return "none" + case fewMissed: + return "few" + case manyMissed: + return "many" + default: + return fmt.Sprintf("unknown(%d)", int(e)) + } +} + // inActiveList checks if cronjob's .status.active has a job with the same UID. func inActiveList(cj *batchv1.CronJob, uid types.UID) bool { for _, j := range cj.Status.Active { @@ -75,11 +96,12 @@ func deleteFromActiveList(cj *batchv1.CronJob, uid types.UID) { // mostRecentScheduleTime returns: // - the last schedule time or CronJob's creation time, // - the most recent time a Job should be created or nil, if that's after now, -// - boolean indicating an excessive number of missed schedules, +// - value indicating either none missed schedules, a few missed or many missed // - error in an edge case where the schedule specification is grammatically correct, // but logically doesn't make sense (31st day for months with only 30 days, for example). -func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule, includeStartingDeadlineSeconds bool) (time.Time, *time.Time, bool, error) { +func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule, includeStartingDeadlineSeconds bool) (time.Time, *time.Time, missedSchedulesType, error) { earliestTime := cj.ObjectMeta.CreationTimestamp.Time + missedSchedules := noneMissed if cj.Status.LastScheduleTime != nil { earliestTime = cj.Status.LastScheduleTime.Time } @@ -96,10 +118,10 @@ func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Sc t2 := schedule.Next(t1) if now.Before(t1) { - return earliestTime, nil, false, nil + return earliestTime, nil, missedSchedules, nil } if now.Before(t2) { - return earliestTime, &t1, false, nil + return earliestTime, &t1, missedSchedules, nil } // It is possible for cron.ParseStandard("59 23 31 2 *") to return an invalid schedule @@ -107,7 +129,7 @@ func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Sc // In this case the timeBetweenTwoSchedules will be 0, and we error out the invalid schedule timeBetweenTwoSchedules := int64(t2.Sub(t1).Round(time.Second).Seconds()) if timeBetweenTwoSchedules < 1 { - return earliestTime, nil, false, fmt.Errorf("time difference between two schedules is less than 1 second") + return earliestTime, nil, missedSchedules, fmt.Errorf("time difference between two schedules is less than 1 second") } // this logic used for calculating number of missed schedules does a rough // approximation, by calculating a diff between two schedules (t1 and t2), @@ -146,12 +168,18 @@ func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Sc // // I've somewhat arbitrarily picked 100, as more than 80, // but less than "lots". - tooManyMissed := numberOfMissedSchedules > 100 + switch { + case numberOfMissedSchedules > 100: + missedSchedules = manyMissed + // inform about few missed, still + case numberOfMissedSchedules > 0: + missedSchedules = fewMissed + } if mostRecentTime.IsZero() { - return earliestTime, nil, tooManyMissed, nil + return earliestTime, nil, missedSchedules, nil } - return earliestTime, &mostRecentTime, tooManyMissed, nil + return earliestTime, &mostRecentTime, missedSchedules, nil } // nextScheduleTimeDuration returns the time duration to requeue based on @@ -160,13 +188,18 @@ func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Sc // realistic cases should be around 100s, the job will still be executed without missing // the schedule. func nextScheduleTimeDuration(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule) *time.Duration { - earliestTime, mostRecentTime, _, err := mostRecentScheduleTime(cj, now, schedule, false) + earliestTime, mostRecentTime, missedSchedules, err := mostRecentScheduleTime(cj, now, schedule, false) if err != nil { // we still have to requeue at some point, so aim for the next scheduling slot from now mostRecentTime = &now } else if mostRecentTime == nil { - // no missed schedules since earliestTime - mostRecentTime = &earliestTime + if missedSchedules == noneMissed { + // no missed schedules since earliestTime + mostRecentTime = &earliestTime + } else { + // if there are missed schedules since earliestTime, always use now + mostRecentTime = &now + } } t := schedule.Next(*mostRecentTime).Add(nextScheduleDelta).Sub(now) @@ -177,13 +210,13 @@ func nextScheduleTimeDuration(cj *batchv1.CronJob, now time.Time, schedule cron. // and before now, or nil if no unmet schedule times, and an error. // If there are too many (>100) unstarted times, it will also record a warning. func nextScheduleTime(logger klog.Logger, cj *batchv1.CronJob, now time.Time, schedule cron.Schedule, recorder record.EventRecorder) (*time.Time, error) { - _, mostRecentTime, tooManyMissed, err := mostRecentScheduleTime(cj, now, schedule, true) + _, mostRecentTime, missedSchedules, err := mostRecentScheduleTime(cj, now, schedule, true) if mostRecentTime == nil || mostRecentTime.After(now) { return nil, err } - if tooManyMissed { + if missedSchedules == manyMissed { recorder.Eventf(cj, corev1.EventTypeWarning, "TooManyMissedTimes", "too many missed start times. Set or decrease .spec.startingDeadlineSeconds or check clock skew") logger.Info("too many missed times", "cronjob", klog.KObj(cj)) } diff --git a/pkg/controller/cronjob/utils_test.go b/pkg/controller/cronjob/utils_test.go index e0012e986d550..d190b310ae004 100644 --- a/pkg/controller/cronjob/utils_test.go +++ b/pkg/controller/cronjob/utils_test.go @@ -157,7 +157,7 @@ func TestNextScheduleTime(t *testing.T) { // schedule is hourly on the hour schedule := "0 * * * ?" - PraseSchedule := func(schedule string) cron.Schedule { + ParseSchedule := func(schedule string) cron.Schedule { sched, err := cron.ParseStandard(schedule) if err != nil { t.Errorf("Error parsing schedule: %#v", err) @@ -189,7 +189,7 @@ func TestNextScheduleTime(t *testing.T) { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)} // Current time is more than creation time, but less than T1. now := T1.Add(-7 * time.Minute) - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule != nil { t.Errorf("expected no start time, got: %v", schedule) } @@ -200,7 +200,7 @@ func TestNextScheduleTime(t *testing.T) { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)} // Current time is after T1 now := T1.Add(2 * time.Second) - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule == nil { t.Errorf("expected 1 start time, got nil") } else if !schedule.Equal(T1) { @@ -215,7 +215,7 @@ func TestNextScheduleTime(t *testing.T) { cj.Status.LastScheduleTime = &metav1.Time{Time: T1} // Current time is after T1 now := T1.Add(2 * time.Minute) - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule != nil { t.Errorf("expected 0 start times, got: %v", schedule) } @@ -228,7 +228,7 @@ func TestNextScheduleTime(t *testing.T) { cj.Status.LastScheduleTime = &metav1.Time{Time: T1} // Current time is after T1 and after T2 now := T2.Add(5 * time.Minute) - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule == nil { t.Errorf("expected 1 start times, got nil") } else if !schedule.Equal(T2) { @@ -241,7 +241,7 @@ func TestNextScheduleTime(t *testing.T) { cj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)} // Current time is after T1 and after T2 now := T2.Add(5 * time.Minute) - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule == nil { t.Errorf("expected 1 start times, got nil") } else if !schedule.Equal(T2) { @@ -253,7 +253,7 @@ func TestNextScheduleTime(t *testing.T) { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)} cj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)} now := T2.Add(10 * 24 * time.Hour) - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule == nil { t.Errorf("expected more than 0 missed times") } @@ -266,7 +266,7 @@ func TestNextScheduleTime(t *testing.T) { // Deadline is short deadline := int64(2 * 60 * 60) cj.Spec.StartingDeadlineSeconds = &deadline - schedule, _ := nextScheduleTime(logger, &cj, now, PraseSchedule(cj.Spec.Schedule), recorder) + schedule, _ := nextScheduleTime(logger, &cj, now, ParseSchedule(cj.Spec.Schedule), recorder) if schedule == nil { t.Errorf("expected more than 0 missed times") } @@ -277,7 +277,7 @@ func TestNextScheduleTime(t *testing.T) { cj.Status.LastScheduleTime = nil now := *deltaTimeAfterTopOfTheHour(1 * time.Hour) // rouge schedule - schedule, err := nextScheduleTime(logger, &cj, now, PraseSchedule("59 23 31 2 *"), recorder) + schedule, err := nextScheduleTime(logger, &cj, now, ParseSchedule("59 23 31 2 *"), recorder) if schedule != nil { t.Errorf("expected no start time, got: %v", schedule) } @@ -364,7 +364,7 @@ func TestMostRecentScheduleTime(t *testing.T) { now time.Time expectedEarliestTime time.Time expectedRecentTime *time.Time - expectedTooManyMissed bool + expectedTooManyMissed missedSchedulesType wantErr bool }{ { @@ -405,9 +405,10 @@ func TestMostRecentScheduleTime(t *testing.T) { Schedule: "0 * * * *", }, }, - now: *deltaTimeAfterTopOfTheHour(301 * time.Minute), - expectedRecentTime: deltaTimeAfterTopOfTheHour(300 * time.Minute), - expectedEarliestTime: *deltaTimeAfterTopOfTheHour(10 * time.Second), + now: *deltaTimeAfterTopOfTheHour(301 * time.Minute), + expectedRecentTime: deltaTimeAfterTopOfTheHour(300 * time.Minute), + expectedEarliestTime: *deltaTimeAfterTopOfTheHour(10 * time.Second), + expectedTooManyMissed: fewMissed, }, { name: "complex schedule", @@ -422,9 +423,10 @@ func TestMostRecentScheduleTime(t *testing.T) { LastScheduleTime: &metav1HalfPastTheHour, }, }, - now: *deltaTimeAfterTopOfTheHour(24*time.Hour + 31*time.Minute), - expectedRecentTime: deltaTimeAfterTopOfTheHour(24*time.Hour + 30*time.Minute), - expectedEarliestTime: *deltaTimeAfterTopOfTheHour(30 * time.Minute), + now: *deltaTimeAfterTopOfTheHour(24*time.Hour + 31*time.Minute), + expectedRecentTime: deltaTimeAfterTopOfTheHour(24*time.Hour + 30*time.Minute), + expectedEarliestTime: *deltaTimeAfterTopOfTheHour(30 * time.Minute), + expectedTooManyMissed: fewMissed, }, { name: "another complex schedule", @@ -439,9 +441,10 @@ func TestMostRecentScheduleTime(t *testing.T) { LastScheduleTime: &metav1HalfPastTheHour, }, }, - now: *deltaTimeAfterTopOfTheHour(30*time.Hour + 30*time.Minute), - expectedRecentTime: nil, - expectedEarliestTime: *deltaTimeAfterTopOfTheHour(30 * time.Minute), + now: *deltaTimeAfterTopOfTheHour(30*time.Hour + 30*time.Minute), + expectedRecentTime: nil, + expectedEarliestTime: *deltaTimeAfterTopOfTheHour(30 * time.Minute), + expectedTooManyMissed: fewMissed, }, { name: "complex schedule with longer diff between executions", @@ -456,9 +459,10 @@ func TestMostRecentScheduleTime(t *testing.T) { LastScheduleTime: &metav1HalfPastTheHour, }, }, - now: *deltaTimeAfterTopOfTheHour(96*time.Hour + 31*time.Minute), - expectedRecentTime: deltaTimeAfterTopOfTheHour(96*time.Hour + 30*time.Minute), - expectedEarliestTime: *deltaTimeAfterTopOfTheHour(30 * time.Minute), + now: *deltaTimeAfterTopOfTheHour(96*time.Hour + 31*time.Minute), + expectedRecentTime: deltaTimeAfterTopOfTheHour(96*time.Hour + 30*time.Minute), + expectedEarliestTime: *deltaTimeAfterTopOfTheHour(30 * time.Minute), + expectedTooManyMissed: fewMissed, }, { name: "complex schedule with shorter diff between executions", @@ -470,9 +474,10 @@ func TestMostRecentScheduleTime(t *testing.T) { Schedule: "30 6-16/4 * * 1-5", }, }, - now: *deltaTimeAfterTopOfTheHour(24*time.Hour + 31*time.Minute), - expectedRecentTime: deltaTimeAfterTopOfTheHour(24*time.Hour + 30*time.Minute), - expectedEarliestTime: *topOfTheHour(), + now: *deltaTimeAfterTopOfTheHour(24*time.Hour + 31*time.Minute), + expectedRecentTime: deltaTimeAfterTopOfTheHour(24*time.Hour + 30*time.Minute), + expectedEarliestTime: *topOfTheHour(), + expectedTooManyMissed: fewMissed, }, { name: "@every schedule", @@ -491,7 +496,7 @@ func TestMostRecentScheduleTime(t *testing.T) { now: *deltaTimeAfterTopOfTheHour(7 * 24 * time.Hour), expectedRecentTime: deltaTimeAfterTopOfTheHour((6 * 24 * time.Hour) + 23*time.Hour + 1*time.Minute), expectedEarliestTime: *deltaTimeAfterTopOfTheHour(1 * time.Minute), - expectedTooManyMissed: true, + expectedTooManyMissed: manyMissed, }, { name: "rogue cronjob", @@ -611,6 +616,96 @@ func TestMostRecentScheduleTime(t *testing.T) { } } +func TestNextScheduleTimeDuration(t *testing.T) { + metav1TopOfTheHour := metav1.NewTime(*topOfTheHour()) + metav1HalfPastTheHour := metav1.NewTime(*deltaTimeAfterTopOfTheHour(30 * time.Minute)) + metav1TwoHoursLater := metav1.NewTime(*deltaTimeAfterTopOfTheHour(2 * time.Hour)) + + tests := []struct { + name string + cj *batchv1.CronJob + now time.Time + expectedDuration time.Duration + }{ + { + name: "complex schedule skipping weekend", + cj: &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1TopOfTheHour, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "30 6-16/4 * * 1-5", + }, + Status: batchv1.CronJobStatus{ + LastScheduleTime: &metav1HalfPastTheHour, + }, + }, + now: *deltaTimeAfterTopOfTheHour(24*time.Hour + 31*time.Minute), + expectedDuration: 3*time.Hour + 59*time.Minute + nextScheduleDelta, + }, + { + name: "another complex schedule skipping weekend", + cj: &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1TopOfTheHour, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "30 10,11,12 * * 1-5", + }, + Status: batchv1.CronJobStatus{ + LastScheduleTime: &metav1HalfPastTheHour, + }, + }, + now: *deltaTimeAfterTopOfTheHour(30*time.Hour + 30*time.Minute), + expectedDuration: 66*time.Hour + nextScheduleDelta, + }, + { + name: "once a week cronjob, missed two runs", + cj: &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1TopOfTheHour, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "0 12 * * 4", + }, + Status: batchv1.CronJobStatus{ + LastScheduleTime: &metav1TwoHoursLater, + }, + }, + now: *deltaTimeAfterTopOfTheHour(19*24*time.Hour + 1*time.Hour + 30*time.Minute), + expectedDuration: 48*time.Hour + 30*time.Minute + nextScheduleDelta, + }, + { + name: "no previous run of a cronjob", + cj: &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1TopOfTheHour, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "0 12 * * 5", + }, + }, + now: *deltaTimeAfterTopOfTheHour(6 * time.Hour), + expectedDuration: 20*time.Hour + nextScheduleDelta, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sched, err := cron.ParseStandard(tt.cj.Spec.Schedule) + if err != nil { + t.Errorf("error setting up the test, %s", err) + } + gotScheduleTimeDuration := nextScheduleTimeDuration(tt.cj, tt.now, sched) + if *gotScheduleTimeDuration < 0 { + t.Errorf("scheduleTimeDuration should never be less than 0, got %s", gotScheduleTimeDuration) + } + if !reflect.DeepEqual(gotScheduleTimeDuration, &tt.expectedDuration) { + t.Errorf("scheduleTimeDuration - got %s, want %s", gotScheduleTimeDuration, tt.expectedDuration) + } + }) + } +} + func topOfTheHour() *time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z") if err != nil { diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index d7755da95d606..8ad023f242ff6 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -47,7 +47,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae if err != nil { return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err) } - maxSurge, maxUnavailable, err := dsc.updatedDesiredNodeCounts(ctx, ds, nodeList, nodeToDaemonPods) + maxSurge, maxUnavailable, desiredNumberScheduled, err := dsc.updatedDesiredNodeCounts(ctx, ds, nodeList, nodeToDaemonPods) if err != nil { return fmt.Errorf("couldn't get unavailable numbers: %v", err) } @@ -140,10 +140,12 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae // * An old available pod is deleted if a new pod is available // * No more than maxSurge new pods are created for old available pods at any one time // - var oldPodsToDelete []string + var oldPodsToDelete []string // these pods are already updated or unavailable on sunsetted node + var shouldNotRunPodsToDelete []string // candidate pods to be deleted on sunsetted nodes var candidateNewNodes []string var allowedNewNodes []string var numSurge int + var numAvailable int for nodeName, pods := range nodeToDaemonPods { newPod, oldPod, ok := findUpdatedPodsOnNode(ds, pods, hash) @@ -153,6 +155,18 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae numSurge++ continue } + + // first count availability for all the nodes (even the ones that we are sunsetting due to scheduling constraints) + if oldPod != nil { + if podutil.IsPodAvailable(oldPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) { + numAvailable++ + } + } else if newPod != nil { + if podutil.IsPodAvailable(newPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}) { + numAvailable++ + } + } + switch { case oldPod == nil: // we don't need to do anything to this node, the manage loop will handle it @@ -160,6 +174,15 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae // this is a surge candidate switch { case !podutil.IsPodAvailable(oldPod, ds.Spec.MinReadySeconds, metav1.Time{Time: now}): + node, err := dsc.nodeLister.Get(nodeName) + if err != nil { + return fmt.Errorf("couldn't get node for nodeName %q: %v", nodeName, err) + } + if shouldRun, _ := NodeShouldRunDaemonPod(node, ds); !shouldRun { + logger.V(5).Info("DaemonSet pod on node is not available and does not match scheduling constraints, remove old pod", "daemonset", klog.KObj(ds), "node", nodeName, "oldPod", klog.KObj(oldPod)) + oldPodsToDelete = append(oldPodsToDelete, oldPod.Name) + continue + } // the old pod isn't available, allow it to become a replacement logger.V(5).Info("Pod on node is out of date and not available, allowing replacement", "daemonset", klog.KObj(ds), "pod", klog.KObj(oldPod), "node", klog.KRef("", nodeName)) // record the replacement @@ -167,10 +190,19 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae allowedNewNodes = make([]string, 0, len(nodeToDaemonPods)) } allowedNewNodes = append(allowedNewNodes, nodeName) - case numSurge >= maxSurge: - // no point considering any other candidates - continue default: + node, err := dsc.nodeLister.Get(nodeName) + if err != nil { + return fmt.Errorf("couldn't get node for nodeName %q: %v", nodeName, err) + } + if shouldRun, _ := NodeShouldRunDaemonPod(node, ds); !shouldRun { + shouldNotRunPodsToDelete = append(shouldNotRunPodsToDelete, oldPod.Name) + continue + } + if numSurge >= maxSurge { + // no point considering any other candidates + continue + } logger.V(5).Info("DaemonSet pod on node is out of date, this is a surge candidate", "daemonset", klog.KObj(ds), "pod", klog.KObj(oldPod), "node", klog.KRef("", nodeName)) // record the candidate if candidateNewNodes == nil { @@ -194,6 +226,27 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae // use any of the candidates we can, including the allowedNewNodes logger.V(5).Info("DaemonSet allowing replacements", "daemonset", klog.KObj(ds), "replacements", len(allowedNewNodes), "maxSurge", maxSurge, "numSurge", numSurge, "candidates", len(candidateNewNodes)) remainingSurge := maxSurge - numSurge + + // With maxSurge, the application owner expects 100% availability. + // When the scheduling constraint change from node A to node B, we do not want the application to stay + // without any available pods. Only delete a pod on node A when a pod on node B becomes available. + if deletablePodsNumber := numAvailable - desiredNumberScheduled; deletablePodsNumber > 0 { + if shouldNotRunPodsToDeleteNumber := len(shouldNotRunPodsToDelete); deletablePodsNumber > shouldNotRunPodsToDeleteNumber { + deletablePodsNumber = shouldNotRunPodsToDeleteNumber + } + for _, podToDeleteName := range shouldNotRunPodsToDelete[:deletablePodsNumber] { + podToDelete, err := dsc.podLister.Pods(ds.Namespace).Get(podToDeleteName) + if err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("couldn't get pod which should be deleted due to scheduling constraints %q: %v", podToDeleteName, err) + } + logger.V(5).Info("DaemonSet pod on node should be deleted due to scheduling constraints", "daemonset", klog.KObj(ds), "pod", klog.KObj(podToDelete), "node", podToDelete.Spec.NodeName) + oldPodsToDelete = append(oldPodsToDelete, podToDeleteName) + } + } + if remainingSurge < 0 { remainingSurge = 0 } @@ -525,9 +578,9 @@ func (dsc *DaemonSetsController) snapshot(ctx context.Context, ds *apps.DaemonSe return history, err } -// updatedDesiredNodeCounts calculates the true number of allowed unavailable or surge pods and +// updatedDesiredNodeCounts calculates the true number of allowed surge, unavailable or desired scheduled pods and // updates the nodeToDaemonPods array to include an empty array for every node that is not scheduled. -func (dsc *DaemonSetsController) updatedDesiredNodeCounts(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) { +func (dsc *DaemonSetsController) updatedDesiredNodeCounts(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) (int, int, int, error) { var desiredNumberScheduled int logger := klog.FromContext(ctx) for i := range nodeList { @@ -545,12 +598,12 @@ func (dsc *DaemonSetsController) updatedDesiredNodeCounts(ctx context.Context, d maxUnavailable, err := util.UnavailableCount(ds, desiredNumberScheduled) if err != nil { - return -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err) + return -1, -1, -1, fmt.Errorf("invalid value for MaxUnavailable: %v", err) } maxSurge, err := util.SurgeCount(ds, desiredNumberScheduled) if err != nil { - return -1, -1, fmt.Errorf("invalid value for MaxSurge: %v", err) + return -1, -1, -1, fmt.Errorf("invalid value for MaxSurge: %v", err) } // if the daemonset returned with an impossible configuration, obey the default of unavailable=1 (in the @@ -560,7 +613,7 @@ func (dsc *DaemonSetsController) updatedDesiredNodeCounts(ctx context.Context, d maxUnavailable = 1 } logger.V(5).Info("DaemonSet with maxSurge and maxUnavailable", "daemonset", klog.KObj(ds), "maxSurge", maxSurge, "maxUnavailable", maxUnavailable) - return maxSurge, maxUnavailable, nil + return maxSurge, maxUnavailable, desiredNumberScheduled, nil } type historiesByRevision []*apps.ControllerRevision diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 86f9ae7fb5c4f..855097b16da81 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -117,7 +117,79 @@ func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) { expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) } -func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) { +func TestDaemonSetUpdatesPodsNotMatchTainstWithMaxSurge(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + + ds := newDaemonSet("foo") + maxSurge := 1 + ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge)) + tolerations := []v1.Toleration{ + {Key: "node-role.kubernetes.io/control-plane", Operator: v1.TolerationOpExists}, + } + setDaemonSetToleration(ds, tolerations) + manager, podControl, _, err := newTestController(ctx, ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + // Add five nodes and taint to one node + addNodes(manager.nodeStore, 0, 5, nil) + taints := []v1.Taint{ + {Key: "node-role.kubernetes.io/control-plane", Effect: v1.TaintEffectNoSchedule}, + } + node := newNode("node-0", nil) + setNodeTaint(node, taints) + err = manager.nodeStore.Update(node) + if err != nil { + t.Fatal(err) + } + + // Create DaemonSet with toleration + expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) + markPodsReady(podControl.podStore) + + // RollingUpdate DaemonSet without toleration + ds.Spec.Template.Spec.Tolerations = nil + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 1, 0) + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + markPodsReady(podControl.podStore) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + markPodsReady(podControl.podStore) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + markPodsReady(podControl.podStore) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + markPodsReady(podControl.podStore) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, maxSurge, 0) + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) +} + +func TestDaemonSetUpdatesWhenNewPodIsNotReady(t *testing.T) { _, ctx := ktesting.NewTestContext(t) ds := newDaemonSet("foo") manager, podControl, _, err := newTestController(ctx, ds) @@ -379,14 +451,15 @@ func newUpdateUnavailable(value intstr.IntOrString) apps.DaemonSetUpdateStrategy func TestGetUnavailableNumbers(t *testing.T) { cases := []struct { - name string - ManagerFunc func(ctx context.Context) *daemonSetsController - ds *apps.DaemonSet - nodeToPods map[string][]*v1.Pod - maxSurge int - maxUnavailable int - emptyNodes int - Err error + name string + ManagerFunc func(ctx context.Context) *daemonSetsController + ds *apps.DaemonSet + nodeToPods map[string][]*v1.Pod + maxSurge int + maxUnavailable int + desiredNumberScheduled int + emptyNodes int + Err error }{ { name: "No nodes", @@ -431,8 +504,9 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - maxUnavailable: 1, - emptyNodes: 0, + maxUnavailable: 1, + desiredNumberScheduled: 2, + emptyNodes: 0, }, { name: "Two nodes, one node without pods", @@ -456,8 +530,9 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-0"] = []*v1.Pod{pod0} return mapping }(), - maxUnavailable: 1, - emptyNodes: 1, + maxUnavailable: 1, + desiredNumberScheduled: 2, + emptyNodes: 1, }, { name: "Two nodes, one node without pods, surge", @@ -481,8 +556,9 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-0"] = []*v1.Pod{pod0} return mapping }(), - maxUnavailable: 1, - emptyNodes: 1, + maxUnavailable: 1, + desiredNumberScheduled: 2, + emptyNodes: 1, }, { name: "Two nodes with pods, MaxUnavailable in percents", @@ -509,8 +585,9 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - maxUnavailable: 1, - emptyNodes: 0, + maxUnavailable: 1, + desiredNumberScheduled: 2, + emptyNodes: 0, }, { name: "Two nodes with pods, MaxUnavailable in percents, surge", @@ -537,9 +614,10 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - maxSurge: 1, - maxUnavailable: 0, - emptyNodes: 0, + maxSurge: 1, + maxUnavailable: 0, + desiredNumberScheduled: 2, + emptyNodes: 0, }, { name: "Two nodes with pods, MaxUnavailable is 100%, surge", @@ -566,9 +644,10 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - maxSurge: 2, - maxUnavailable: 0, - emptyNodes: 0, + maxSurge: 2, + maxUnavailable: 0, + desiredNumberScheduled: 2, + emptyNodes: 0, }, { name: "Two nodes with pods, MaxUnavailable in percents, pod terminating", @@ -597,8 +676,9 @@ func TestGetUnavailableNumbers(t *testing.T) { mapping["node-1"] = []*v1.Pod{pod1} return mapping }(), - maxUnavailable: 2, - emptyNodes: 1, + maxUnavailable: 2, + desiredNumberScheduled: 3, + emptyNodes: 1, }, } @@ -611,7 +691,7 @@ func TestGetUnavailableNumbers(t *testing.T) { if err != nil { t.Fatalf("error listing nodes: %v", err) } - maxSurge, maxUnavailable, err := manager.updatedDesiredNodeCounts(ctx, c.ds, nodeList, c.nodeToPods) + maxSurge, maxUnavailable, desiredNumberScheduled, err := manager.updatedDesiredNodeCounts(ctx, c.ds, nodeList, c.nodeToPods) if err != nil && c.Err != nil { if c.Err != err { t.Fatalf("Expected error: %v but got: %v", c.Err, err) @@ -620,8 +700,8 @@ func TestGetUnavailableNumbers(t *testing.T) { if err != nil { t.Fatalf("Unexpected error: %v", err) } - if maxSurge != c.maxSurge || maxUnavailable != c.maxUnavailable { - t.Errorf("Wrong values. maxSurge: %d, expected %d, maxUnavailable: %d, expected: %d", maxSurge, c.maxSurge, maxUnavailable, c.maxUnavailable) + if maxSurge != c.maxSurge || maxUnavailable != c.maxUnavailable || desiredNumberScheduled != c.desiredNumberScheduled { + t.Errorf("Wrong values. maxSurge: %d, expected %d, maxUnavailable: %d, expected: %d, desiredNumberScheduled: %d, expected: %d", maxSurge, c.maxSurge, maxUnavailable, c.maxUnavailable, desiredNumberScheduled, c.desiredNumberScheduled) } var emptyNodes int for _, pods := range c.nodeToPods { diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index da13ebb23e3ec..2527e31480ebb 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - corev1apply "k8s.io/client-go/applyconfigurations/core/v1" "k8s.io/client-go/discovery" appsv1informers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" @@ -74,9 +73,6 @@ const ( // Once the timeout is reached, this controller attempts to set the status // of the condition to False. stalePodDisruptionTimeout = 2 * time.Minute - - // field manager used to disable the pod failure condition - fieldManager = "DisruptionController" ) type updater func(context.Context, *policy.PodDisruptionBudget) error @@ -770,16 +766,15 @@ func (dc *DisruptionController) syncStalePodDisruption(ctx context.Context, key return nil } - podApply := corev1apply.Pod(pod.Name, pod.Namespace). - WithStatus(corev1apply.PodStatus()). - WithResourceVersion(pod.ResourceVersion) - podApply.Status.WithConditions(corev1apply.PodCondition(). - WithType(v1.DisruptionTarget). - WithStatus(v1.ConditionFalse). - WithLastTransitionTime(metav1.Now()), - ) - - if _, err := dc.kubeClient.CoreV1().Pods(pod.Namespace).ApplyStatus(ctx, podApply, metav1.ApplyOptions{FieldManager: fieldManager, Force: true}); err != nil { + newPod := pod.DeepCopy() + updated := apipod.UpdatePodCondition(&newPod.Status, &v1.PodCondition{ + Type: v1.DisruptionTarget, + Status: v1.ConditionFalse, + }) + if !updated { + return nil + } + if _, err := dc.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, newPod, metav1.UpdateOptions{}); err != nil { return err } logger.V(2).Info("Reset stale DisruptionTarget condition to False", "pod", klog.KObj(pod)) diff --git a/pkg/controller/endpointslice/endpointslice_controller.go b/pkg/controller/endpointslice/endpointslice_controller.go index 262fc04dee36d..df7bb7b94da00 100644 --- a/pkg/controller/endpointslice/endpointslice_controller.go +++ b/pkg/controller/endpointslice/endpointslice_controller.go @@ -60,7 +60,7 @@ const ( // 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s, 512s, 1000s (max) maxRetries = 15 - // endpointSliceChangeMinSyncDelay indicates the mininum delay before + // endpointSliceChangeMinSyncDelay indicates the minimum delay before // queuing a syncService call after an EndpointSlice changes. If // endpointUpdatesBatchPeriod is greater than this value, it will be used // instead. This helps batch processing of changes to multiple diff --git a/pkg/controller/job/backoff_utils.go b/pkg/controller/job/backoff_utils.go index 2e89e5a1ab3f4..ecd86dc31139c 100644 --- a/pkg/controller/job/backoff_utils.go +++ b/pkg/controller/job/backoff_utils.go @@ -26,7 +26,7 @@ import ( "k8s.io/klog/v2" apipod "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/utils/clock" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) type backoffRecord struct { @@ -207,7 +207,7 @@ func getFinishTimeFromPodReadyFalseCondition(p *v1.Pod) *time.Time { func getFinishTimeFromDeletionTimestamp(p *v1.Pod) *time.Time { if p.DeletionTimestamp != nil { - finishTime := p.DeletionTimestamp.Time.Add(-time.Duration(pointer.Int64Deref(p.DeletionGracePeriodSeconds, 0)) * time.Second) + finishTime := p.DeletionTimestamp.Time.Add(-time.Duration(ptr.Deref(p.DeletionGracePeriodSeconds, 0)) * time.Second) return &finishTime } return nil diff --git a/pkg/controller/job/backoff_utils_test.go b/pkg/controller/job/backoff_utils_test.go index f94d51efad927..e300982c0b8c3 100644 --- a/pkg/controller/job/backoff_utils_test.go +++ b/pkg/controller/job/backoff_utils_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2/ktesting" clocktesting "k8s.io/utils/clock/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestNewBackoffRecord(t *testing.T) { @@ -287,7 +287,7 @@ func TestGetFinishedTime(t *testing.T) { }, ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &metav1.Time{Time: defaultTestTime}, - DeletionGracePeriodSeconds: pointer.Int64(30), + DeletionGracePeriodSeconds: ptr.To[int64](30), }, }, wantFinishTime: defaultTestTimeMinus30s, diff --git a/pkg/controller/job/indexed_job_utils_test.go b/pkg/controller/job/indexed_job_utils_test.go index a79fa744631d9..8800fe87563cc 100644 --- a/pkg/controller/job/indexed_job_utils_test.go +++ b/pkg/controller/job/indexed_job_utils_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/features" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const noIndex = "-" @@ -209,7 +209,7 @@ func TestCalculateSucceededIndexes(t *testing.T) { CompletedIndexes: tc.prevSucceeded, }, Spec: batch.JobSpec{ - Completions: pointer.Int32(tc.completions), + Completions: ptr.To(tc.completions), }, } pods := hollowPodsWithIndexPhase(tc.pods) @@ -238,8 +238,8 @@ func TestIsIndexFailed(t *testing.T) { "failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=0": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pod: buildPod().indexFailureCount("0").phase(v1.PodFailed).index("0").trackingFinalizer().Pod, @@ -248,8 +248,8 @@ func TestIsIndexFailed(t *testing.T) { "failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=1": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pod: buildPod().indexFailureCount("1").phase(v1.PodFailed).index("1").trackingFinalizer().Pod, @@ -259,8 +259,8 @@ func TestIsIndexFailed(t *testing.T) { enableJobPodFailurePolicy: true, job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](1), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -292,8 +292,8 @@ func TestIsIndexFailed(t *testing.T) { enableJobPodFailurePolicy: false, job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](1), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -346,8 +346,8 @@ func TestCalculateFailedIndexes(t *testing.T) { "one new index failed": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []*v1.Pod{ @@ -359,8 +359,8 @@ func TestCalculateFailedIndexes(t *testing.T) { "pod without finalizer is ignored": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -371,8 +371,8 @@ func TestCalculateFailedIndexes(t *testing.T) { "pod outside completions is ignored": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -383,11 +383,11 @@ func TestCalculateFailedIndexes(t *testing.T) { "extend the failed indexes": { job: batch.Job{ Status: batch.JobStatus{ - FailedIndexes: pointer.String("0"), + FailedIndexes: ptr.To("0"), }, Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -398,11 +398,11 @@ func TestCalculateFailedIndexes(t *testing.T) { "prev failed indexes empty": { job: batch.Job{ Status: batch.JobStatus{ - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), }, Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -413,11 +413,11 @@ func TestCalculateFailedIndexes(t *testing.T) { "prev failed indexes outside the completions": { job: batch.Job{ Status: batch.JobStatus{ - FailedIndexes: pointer.String("9"), + FailedIndexes: ptr.To("9"), }, Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -449,8 +449,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "failed pods are kept corresponding to non-failed indexes are kept": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(3), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](3), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []*v1.Pod{ @@ -463,8 +463,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "failed pod without finalizer; the pod's deletion is not delayed as it already started": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -475,8 +475,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "failed pod with expected finalizer removal; the pod's deletion is not delayed as it already started": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -488,8 +488,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "failed pod with index outside of completions; the pod's deletion is not delayed": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(0), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](0), }, }, pods: []*v1.Pod{ @@ -500,8 +500,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "failed pod for active index; the pod's deletion is not delayed as it is already replaced": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []*v1.Pod{ @@ -513,8 +513,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "failed pod for succeeded index; the pod's deletion is not delayed as it is already replaced": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []*v1.Pod{ @@ -526,8 +526,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "multiple failed pods for index with different failure count; only the pod with highest failure count is kept": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(4), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](4), }, }, pods: []*v1.Pod{ @@ -540,8 +540,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) { "multiple failed pods for index with different finish times; only the last failed pod is kept": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(4), + Completions: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](4), }, }, pods: []*v1.Pod{ diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index 1572f95a343f9..0ed26946dac43 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -53,13 +53,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/utils/clock" "k8s.io/utils/integer" - "k8s.io/utils/pointer" -) - -const ( - // PodFailurePolicy reason indicates a job failure condition is added due to - // a failed pod matching a pod failure policy rule - jobConditionReasonPodFailurePolicy = "PodFailurePolicy" + "k8s.io/utils/ptr" ) // controllerKind contains the schema.GroupVersionKind for this controller type. @@ -149,11 +143,11 @@ type syncJobCtx struct { // NewController creates a new Job controller that keeps the relevant pods // in sync with their corresponding Job objects. -func NewController(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller { +func NewController(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) (*Controller, error) { return newControllerWithClock(ctx, podInformer, jobInformer, kubeClient, &clock.RealClock{}) } -func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) *Controller { +func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) (*Controller, error) { eventBroadcaster := record.NewBroadcaster() logger := klog.FromContext(ctx) @@ -173,7 +167,7 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn podBackoffStore: newBackoffStore(), } - jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + if _, err := jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { jm.enqueueSyncJobImmediately(logger, obj) }, @@ -183,11 +177,13 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn DeleteFunc: func(obj interface{}) { jm.deleteJob(logger, obj) }, - }) + }); err != nil { + return nil, fmt.Errorf("adding Job event handler: %w", err) + } jm.jobLister = jobInformer.Lister() jm.jobStoreSynced = jobInformer.Informer().HasSynced - podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + if _, err := podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { jm.addPod(logger, obj) }, @@ -197,7 +193,9 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn DeleteFunc: func(obj interface{}) { jm.deletePod(logger, obj, true) }, - }) + }); err != nil { + return nil, fmt.Errorf("adding Pod event handler: %w", err) + } jm.podStore = podInformer.Lister() jm.podStoreSynced = podInformer.Informer().HasSynced @@ -207,7 +205,7 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn metrics.Register() - return jm + return jm, nil } // Run the main goroutine responsible for watching and syncing jobs. @@ -467,7 +465,12 @@ func (jm *Controller) updateJob(logger klog.Logger, old, cur interface{}) { } else { // Trigger immediate sync when spec is changed. jm.enqueueSyncJobImmediately(logger, curJob) + } + // The job shouldn't be marked as finished until all pod finalizers are removed. + // This is a backup operation in this case. + if IsJobFinished(curJob) { + jm.cleanupPodFinalizers(curJob) } // check if need to add a new rsync for ActiveDeadlineSeconds @@ -504,18 +507,7 @@ func (jm *Controller) deleteJob(logger klog.Logger, obj interface{}) { return } } - // Listing pods shouldn't really fail, as we are just querying the informer cache. - selector, err := metav1.LabelSelectorAsSelector(jobObj.Spec.Selector) - if err != nil { - utilruntime.HandleError(fmt.Errorf("parsing deleted job selector: %v", err)) - return - } - pods, _ := jm.podStore.Pods(jobObj.Namespace).List(selector) - for _, pod := range pods { - if metav1.IsControlledBy(pod, jobObj) && hasJobTrackingFinalizer(pod) { - jm.enqueueOrphanPod(pod) - } - } + jm.cleanupPodFinalizers(jobObj) } // enqueueSyncJobImmediately tells the Job controller to invoke syncJob @@ -786,7 +778,7 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) { } var terminating *int32 if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) { - terminating = pointer.Int32(controller.CountTerminatingPods(pods)) + terminating = ptr.To(controller.CountTerminatingPods(pods)) } jobCtx := &syncJobCtx{ job: &job, @@ -802,7 +794,7 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) { failed := job.Status.Failed + int32(nonIgnoredFailedPodsCount(jobCtx, newFailedPods)) + int32(len(jobCtx.uncounted.failed)) var ready *int32 if feature.DefaultFeatureGate.Enabled(features.JobReadyPods) { - ready = pointer.Int32(countReadyPods(jobCtx.activePods)) + ready = ptr.To(countReadyPods(jobCtx.activePods)) } // Job first start. Set StartTime only if the job is not in the suspended state. @@ -822,16 +814,16 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) { jobCtx.finishedCondition = newFailedConditionForFailureTarget(failureTargetCondition, jm.clock.Now()) } else if failJobMessage := getFailJobMessage(&job, pods); failJobMessage != nil { // Prepare the interim FailureTarget condition to record the failure message before the finalizers (allowing removal of the pods) are removed. - jobCtx.finishedCondition = newCondition(batch.JobFailureTarget, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage, jm.clock.Now()) + jobCtx.finishedCondition = newCondition(batch.JobFailureTarget, v1.ConditionTrue, batch.JobReasonPodFailurePolicy, *failJobMessage, jm.clock.Now()) } } if jobCtx.finishedCondition == nil { if exceedsBackoffLimit || pastBackoffLimitOnFailure(&job, pods) { // check if the number of pod restart exceeds backoff (for restart OnFailure only) // OR if the number of failed jobs increased since the last syncJob - jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, "BackoffLimitExceeded", "Job has reached the specified backoff limit", jm.clock.Now()) + jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, batch.JobReasonBackoffLimitExceeded, "Job has reached the specified backoff limit", jm.clock.Now()) } else if jm.pastActiveDeadline(&job) { - jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, "DeadlineExceeded", "Job was active longer than specified deadline", jm.clock.Now()) + jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, batch.JobReasonDeadlineExceeded, "Job was active longer than specified deadline", jm.clock.Now()) } else if job.Spec.ActiveDeadlineSeconds != nil && !jobSuspended(&job) { syncDuration := time.Duration(*job.Spec.ActiveDeadlineSeconds)*time.Second - jm.clock.Since(job.Status.StartTime.Time) logger.V(2).Info("Job has activeDeadlineSeconds configuration. Will sync this job again", "key", key, "nextSyncIn", syncDuration) @@ -846,9 +838,9 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) { jobCtx.failedIndexes = calculateFailedIndexes(logger, &job, pods) if jobCtx.finishedCondition == nil { if job.Spec.MaxFailedIndexes != nil && jobCtx.failedIndexes.total() > int(*job.Spec.MaxFailedIndexes) { - jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, "MaxFailedIndexesExceeded", "Job has exceeded the specified maximal number of failed indexes", jm.clock.Now()) + jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, batch.JobReasonMaxFailedIndexesExceeded, "Job has exceeded the specified maximal number of failed indexes", jm.clock.Now()) } else if jobCtx.failedIndexes.total() > 0 && jobCtx.failedIndexes.total()+jobCtx.succeededIndexes.total() >= int(*job.Spec.Completions) { - jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, "FailedIndexes", "Job has failed indexes", jm.clock.Now()) + jobCtx.finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, batch.JobReasonFailedIndexes, "Job has failed indexes", jm.clock.Now()) } } jobCtx.podsWithDelayedDeletionPerIndex = getPodsWithDelayedDeletionPerIndex(logger, jobCtx) @@ -921,11 +913,11 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) { } } - needsStatusUpdate := suspendCondChanged || active != job.Status.Active || !pointer.Int32Equal(ready, job.Status.Ready) + needsStatusUpdate := suspendCondChanged || active != job.Status.Active || !ptr.Equal(ready, job.Status.Ready) + needsStatusUpdate = needsStatusUpdate || !ptr.Equal(job.Status.Terminating, jobCtx.terminating) job.Status.Active = active job.Status.Ready = ready job.Status.Terminating = jobCtx.terminating - needsStatusUpdate = needsStatusUpdate || !pointer.Int32Equal(job.Status.Terminating, jobCtx.terminating) err = jm.trackJobStatusAndRemoveFinalizers(ctx, jobCtx, needsStatusUpdate) if err != nil { return fmt.Errorf("tracking status: %w", err) @@ -1109,9 +1101,9 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job jobCtx.job.Status.CompletedIndexes = succeededIndexesStr var failedIndexesStr *string if jobCtx.failedIndexes != nil { - failedIndexesStr = pointer.String(jobCtx.failedIndexes.String()) + failedIndexesStr = ptr.To(jobCtx.failedIndexes.String()) } - if !pointer.StringEqual(jobCtx.job.Status.FailedIndexes, failedIndexesStr) { + if !ptr.Equal(jobCtx.job.Status.FailedIndexes, failedIndexesStr) { jobCtx.job.Status.FailedIndexes = failedIndexesStr needsFlush = true } @@ -1493,7 +1485,7 @@ func (jm *Controller) manageJob(ctx context.Context, job *batch.Job, jobCtx *syn } } - rmAtLeast := active + terminating - wantActive + rmAtLeast := active - wantActive if rmAtLeast < 0 { rmAtLeast = 0 } @@ -1554,6 +1546,9 @@ func (jm *Controller) manageJob(ctx context.Context, job *batch.Job, jobCtx *syn } podTemplate.Finalizers = appendJobCompletionFinalizerIfNotFound(podTemplate.Finalizers) + // Counters for pod creation status (used by the job_pods_creation_total metric) + var creationsSucceeded, creationsFailed int32 = 0, 0 + // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // and double with each successful iteration in a kind of "slow start". // This handles attempts to start large numbers of pods that would @@ -1603,7 +1598,9 @@ func (jm *Controller) manageJob(ctx context.Context, job *batch.Job, jobCtx *syn jm.expectations.CreationObserved(logger, jobKey) atomic.AddInt32(&active, -1) errCh <- err + atomic.AddInt32(&creationsFailed, 1) } + atomic.AddInt32(&creationsSucceeded, 1) }() } wait.Wait() @@ -1622,6 +1619,7 @@ func (jm *Controller) manageJob(ctx context.Context, job *batch.Job, jobCtx *syn } diff -= batchSize } + recordJobPodsCreationTotal(job, creationsSucceeded, creationsFailed) return active, metrics.JobSyncActionPodsCreated, errorFromChannel(errCh) } @@ -1645,7 +1643,7 @@ func (jm *Controller) getPodCreationInfoForIndependentIndexes(logger klog.Logger if len(indexesToAddNow) > 0 { return indexesToAddNow, 0 } - return indexesToAddNow, pointer.DurationDeref(minRemainingTimePerIndex, 0) + return indexesToAddNow, ptr.Deref(minRemainingTimePerIndex, 0) } // activePodsForRemoval returns Pods that should be removed because there @@ -1841,8 +1839,16 @@ func recordJobPodFinished(logger klog.Logger, job *batch.Job, oldCounters batch. // in tandem, and now a previously completed index is // now out of range (i.e. index >= spec.Completions). if isIndexedJob(job) { + completions := int(*job.Spec.Completions) if job.Status.CompletedIndexes != oldCounters.CompletedIndexes { - diff = parseIndexesFromString(logger, job.Status.CompletedIndexes, int(*job.Spec.Completions)).total() - parseIndexesFromString(logger, oldCounters.CompletedIndexes, int(*job.Spec.Completions)).total() + diff = indexesCount(logger, &job.Status.CompletedIndexes, completions) - indexesCount(logger, &oldCounters.CompletedIndexes, completions) + } + backoffLimitLabel := backoffLimitMetricsLabel(job) + metrics.JobFinishedIndexesTotal.WithLabelValues(metrics.Succeeded, backoffLimitLabel).Add(float64(diff)) + if hasBackoffLimitPerIndex(job) && job.Status.FailedIndexes != oldCounters.FailedIndexes { + if failedDiff := indexesCount(logger, job.Status.FailedIndexes, completions) - indexesCount(logger, oldCounters.FailedIndexes, completions); failedDiff > 0 { + metrics.JobFinishedIndexesTotal.WithLabelValues(metrics.Failed, backoffLimitLabel).Add(float64(failedDiff)) + } } } else { diff = int(job.Status.Succeeded) - int(oldCounters.Succeeded) @@ -1854,6 +1860,20 @@ func recordJobPodFinished(logger klog.Logger, job *batch.Job, oldCounters batch. metrics.JobPodsFinished.WithLabelValues(completionMode, metrics.Failed).Add(float64(diff)) } +func indexesCount(logger klog.Logger, indexesStr *string, completions int) int { + if indexesStr == nil { + return 0 + } + return parseIndexesFromString(logger, *indexesStr, completions).total() +} + +func backoffLimitMetricsLabel(job *batch.Job) string { + if hasBackoffLimitPerIndex(job) { + return "perIndex" + } + return "global" +} + func recordJobPodFailurePolicyActions(job *batch.Job, podFailureCountByPolicyAction map[string]int) { for action, count := range podFailureCountByPolicyAction { metrics.PodFailuresHandledByFailurePolicy.WithLabelValues(action).Add(float64(count)) @@ -1874,8 +1894,46 @@ func countReadyPods(pods []*v1.Pod) int32 { // PodReplacementPolicy controls when we recreate pods if they are marked as terminating // Failed means that we recreate only once the pod has terminated. func onlyReplaceFailedPods(job *batch.Job) bool { - if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) && *job.Spec.PodReplacementPolicy == batch.Failed { + // We check both PodReplacementPolicy for nil and failed + // because it is possible that `PodReplacementPolicy` is not defaulted, + // when the `JobPodReplacementPolicy` feature gate is disabled for API server. + if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) && job.Spec.PodReplacementPolicy != nil && *job.Spec.PodReplacementPolicy == batch.Failed { return true } return feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) && job.Spec.PodFailurePolicy != nil } + +func (jm *Controller) cleanupPodFinalizers(job *batch.Job) { + // Listing pods shouldn't really fail, as we are just querying the informer cache. + selector, err := metav1.LabelSelectorAsSelector(job.Spec.Selector) + if err != nil { + utilruntime.HandleError(fmt.Errorf("parsing deleted job selector: %v", err)) + return + } + pods, _ := jm.podStore.Pods(job.Namespace).List(selector) + for _, pod := range pods { + if metav1.IsControlledBy(pod, job) && hasJobTrackingFinalizer(pod) { + jm.enqueueOrphanPod(pod) + } + } +} + +func recordJobPodsCreationTotal(job *batch.Job, succeeded, failed int32) { + reason := metrics.PodCreateNew + if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) { + podsTerminating := job.Status.Terminating != nil && *job.Status.Terminating > 0 + isRecreateAction := podsTerminating || job.Status.Failed > 0 + if isRecreateAction { + reason = metrics.PodRecreateTerminatingOrFailed + if *job.Spec.PodReplacementPolicy == batch.Failed { + reason = metrics.PodRecreateFailed + } + } + } + if succeeded > 0 { + metrics.JobPodsCreationTotal.WithLabelValues(reason, metrics.Succeeded).Add(float64(succeeded)) + } + if failed > 0 { + metrics.JobPodsCreationTotal.WithLabelValues(reason, metrics.Failed).Add(float64(failed)) + } +} diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index a84ffc7c1016a..bcb59398d8d6d 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -59,7 +59,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/utils/clock" clocktesting "k8s.io/utils/clock/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var realClock = &clock.RealClock{} @@ -123,13 +123,18 @@ func newJob(parallelism, completions, backoffLimit int32, completionMode batch.C return newJobWithName("foobar", parallelism, completions, backoffLimit, completionMode) } -func newControllerFromClient(ctx context.Context, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*Controller, informers.SharedInformerFactory) { - return newControllerFromClientWithClock(ctx, kubeClient, resyncPeriod, realClock) +func newControllerFromClient(ctx context.Context, t *testing.T, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*Controller, informers.SharedInformerFactory) { + t.Helper() + return newControllerFromClientWithClock(ctx, t, kubeClient, resyncPeriod, realClock) } -func newControllerFromClientWithClock(ctx context.Context, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, clock clock.WithTicker) (*Controller, informers.SharedInformerFactory) { +func newControllerFromClientWithClock(ctx context.Context, t *testing.T, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, clock clock.WithTicker) (*Controller, informers.SharedInformerFactory) { + t.Helper() sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod()) - jm := newControllerWithClock(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient, clock) + jm, err := newControllerWithClock(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient, clock) + if err != nil { + t.Fatalf("Error creating Job controller: %v", err) + } jm.podControl = &controller.FakePodControl{} return jm, sharedInformers } @@ -286,7 +291,6 @@ func TestControllerSyncJob(t *testing.T) { expectedPodPatches int // features - jobReadyPodsEnabled bool podIndexLabelDisabled bool jobPodReplacementPolicy bool }{ @@ -296,6 +300,7 @@ func TestControllerSyncJob(t *testing.T) { backoffLimit: 6, expectedCreations: 2, expectedActive: 2, + expectedReady: ptr.To[int32](0), }, "WQ job start": { parallelism: 2, @@ -303,6 +308,7 @@ func TestControllerSyncJob(t *testing.T) { backoffLimit: 6, expectedCreations: 2, expectedActive: 2, + expectedReady: ptr.To[int32](0), }, "pending pods": { parallelism: 2, @@ -310,6 +316,7 @@ func TestControllerSyncJob(t *testing.T) { backoffLimit: 6, pendingPods: 2, expectedActive: 2, + expectedReady: ptr.To[int32](0), }, "correct # of pods": { parallelism: 3, @@ -318,16 +325,7 @@ func TestControllerSyncJob(t *testing.T) { activePods: 3, readyPods: 2, expectedActive: 3, - }, - "correct # of pods, ready enabled": { - parallelism: 3, - completions: 5, - backoffLimit: 6, - activePods: 3, - readyPods: 2, - expectedActive: 3, - expectedReady: pointer.Int32(2), - jobReadyPodsEnabled: true, + expectedReady: ptr.To[int32](2), }, "WQ job: correct # of pods": { parallelism: 2, @@ -335,6 +333,7 @@ func TestControllerSyncJob(t *testing.T) { backoffLimit: 6, activePods: 2, expectedActive: 2, + expectedReady: ptr.To[int32](0), }, "too few active pods": { parallelism: 2, @@ -346,6 +345,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 2, expectedSucceeded: 1, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "WQ job: recreate pods when failed": { parallelism: 1, @@ -356,10 +356,26 @@ func TestControllerSyncJob(t *testing.T) { podReplacementPolicy: podReplacementPolicy(batch.Failed), jobPodReplacementPolicy: true, terminatingPods: 1, - expectedTerminating: pointer.Int32(1), + expectedTerminating: ptr.To[int32](1), + expectedReady: ptr.To[int32](0), + // Removes finalizer and deletes one failed pod + expectedPodPatches: 1, + expectedFailed: 1, + expectedActive: 1, + }, + "WQ job: turn on PodReplacementPolicy but not set PodReplacementPolicy": { + parallelism: 1, + completions: 1, + backoffLimit: 6, + activePods: 1, + failedPods: 1, + jobPodReplacementPolicy: true, + expectedTerminating: ptr.To[int32](1), + expectedReady: ptr.To[int32](0), + terminatingPods: 1, + expectedActive: 1, expectedPodPatches: 2, - expectedDeletions: 1, - expectedFailed: 1, + expectedFailed: 2, }, "WQ job: recreate pods when terminating or failed": { parallelism: 1, @@ -370,12 +386,27 @@ func TestControllerSyncJob(t *testing.T) { podReplacementPolicy: podReplacementPolicy(batch.TerminatingOrFailed), jobPodReplacementPolicy: true, terminatingPods: 1, - expectedTerminating: pointer.Int32(1), + expectedTerminating: ptr.To[int32](1), + expectedReady: ptr.To[int32](0), expectedActive: 1, expectedPodPatches: 2, expectedFailed: 2, }, - + "more terminating pods than parallelism": { + parallelism: 1, + completions: 1, + backoffLimit: 6, + activePods: 2, + failedPods: 0, + terminatingPods: 4, + podReplacementPolicy: podReplacementPolicy(batch.Failed), + jobPodReplacementPolicy: true, + expectedTerminating: ptr.To[int32](4), + expectedReady: ptr.To[int32](0), + expectedActive: 1, + expectedDeletions: 1, + expectedPodPatches: 1, + }, "too few active pods and active back-off": { parallelism: 1, completions: 1, @@ -396,6 +427,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 0, expectedSucceeded: 0, expectedPodPatches: 0, + expectedReady: ptr.To[int32](0), controllerTime: &referenceTime, }, "too few active pods and no back-offs": { @@ -412,6 +444,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 1, expectedSucceeded: 0, expectedPodPatches: 0, + expectedReady: ptr.To[int32](0), controllerTime: &referenceTime, }, "too few active pods with a dynamic job": { @@ -421,6 +454,7 @@ func TestControllerSyncJob(t *testing.T) { activePods: 1, expectedCreations: 1, expectedActive: 2, + expectedReady: ptr.To[int32](0), }, "too few active pods, with controller error": { parallelism: 2, @@ -433,6 +467,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 1, expectedSucceeded: 0, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "too many active pods": { parallelism: 2, @@ -442,6 +477,7 @@ func TestControllerSyncJob(t *testing.T) { expectedDeletions: 1, expectedActive: 2, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "too many active pods, with controller error": { parallelism: 2, @@ -452,6 +488,7 @@ func TestControllerSyncJob(t *testing.T) { expectedDeletions: 0, expectedPodPatches: 1, expectedActive: 3, + expectedReady: ptr.To[int32](0), }, "failed + succeed pods: reset backoff delay": { parallelism: 2, @@ -465,6 +502,7 @@ func TestControllerSyncJob(t *testing.T) { expectedSucceeded: 1, expectedFailed: 1, expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "new failed pod": { parallelism: 2, @@ -476,6 +514,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 2, expectedFailed: 1, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "no new pod; possible finalizer update of failed pod": { parallelism: 1, @@ -492,6 +531,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 1, expectedFailed: 1, expectedPodPatches: 0, + expectedReady: ptr.To[int32](0), }, "only new failed pod with controller error": { parallelism: 2, @@ -504,6 +544,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 1, expectedFailed: 0, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "job finish": { parallelism: 2, @@ -514,6 +555,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCondition: &jobConditionComplete, expectedConditionStatus: v1.ConditionTrue, expectedPodPatches: 5, + expectedReady: ptr.To[int32](0), }, "WQ job finishing": { parallelism: 2, @@ -524,6 +566,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 1, expectedSucceeded: 1, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "WQ job all finished": { parallelism: 2, @@ -534,6 +577,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCondition: &jobConditionComplete, expectedConditionStatus: v1.ConditionTrue, expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "WQ job all finished despite one failure": { parallelism: 2, @@ -546,6 +590,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCondition: &jobConditionComplete, expectedConditionStatus: v1.ConditionTrue, expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "more active pods than parallelism": { parallelism: 2, @@ -555,6 +600,7 @@ func TestControllerSyncJob(t *testing.T) { expectedDeletions: 8, expectedActive: 2, expectedPodPatches: 8, + expectedReady: ptr.To[int32](0), }, "more active pods than remaining completions": { parallelism: 3, @@ -566,6 +612,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 2, expectedSucceeded: 2, expectedPodPatches: 3, + expectedReady: ptr.To[int32](0), }, "status change": { parallelism: 2, @@ -576,6 +623,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 2, expectedSucceeded: 2, expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "deleting job": { parallelism: 2, @@ -588,6 +636,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 2, expectedSucceeded: 1, expectedPodPatches: 3, + expectedReady: ptr.To[int32](0), }, "limited pods": { parallelism: 100, @@ -596,6 +645,7 @@ func TestControllerSyncJob(t *testing.T) { podLimit: 10, expectedCreations: 10, expectedActive: 10, + expectedReady: ptr.To[int32](0), }, "too many job failures": { parallelism: 2, @@ -607,6 +657,7 @@ func TestControllerSyncJob(t *testing.T) { expectedConditionStatus: v1.ConditionTrue, expectedConditionReason: "BackoffLimitExceeded", expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "job failures, unsatisfied expectations": { parallelism: 2, @@ -616,6 +667,7 @@ func TestControllerSyncJob(t *testing.T) { fakeExpectationAtCreation: 1, expectedFailed: 1, expectedPodPatches: 1, + expectedReady: ptr.To[int32](0), }, "indexed job start": { parallelism: 2, @@ -625,6 +677,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCreations: 2, expectedActive: 2, expectedCreatedIndexes: sets.New(0, 1), + expectedReady: ptr.To[int32](0), }, "indexed job with some pods deleted, podReplacementPolicy Failed": { parallelism: 2, @@ -637,7 +690,8 @@ func TestControllerSyncJob(t *testing.T) { podReplacementPolicy: podReplacementPolicy(batch.Failed), jobPodReplacementPolicy: true, terminatingPods: 1, - expectedTerminating: pointer.Int32(1), + expectedTerminating: ptr.To[int32](1), + expectedReady: ptr.To[int32](0), }, "indexed job with some pods deleted, podReplacementPolicy TerminatingOrFailed": { parallelism: 2, @@ -650,7 +704,8 @@ func TestControllerSyncJob(t *testing.T) { podReplacementPolicy: podReplacementPolicy(batch.TerminatingOrFailed), jobPodReplacementPolicy: true, terminatingPods: 1, - expectedTerminating: pointer.Int32(1), + expectedTerminating: ptr.To[int32](1), + expectedReady: ptr.To[int32](0), expectedPodPatches: 1, }, "indexed job completed": { @@ -670,6 +725,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCondition: &jobConditionComplete, expectedConditionStatus: v1.ConditionTrue, expectedPodPatches: 4, + expectedReady: ptr.To[int32](0), }, "indexed job repeated completed index": { parallelism: 2, @@ -687,6 +743,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCompletedIdxs: "0,1", expectedCreatedIndexes: sets.New(2), expectedPodPatches: 3, + expectedReady: ptr.To[int32](0), }, "indexed job some running and completed pods": { parallelism: 8, @@ -709,6 +766,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCompletedIdxs: "2,4,5,7-9", expectedCreatedIndexes: sets.New(1, 6, 10, 11, 12, 13), expectedPodPatches: 6, + expectedReady: ptr.To[int32](0), }, "indexed job some failed pods": { parallelism: 3, @@ -725,6 +783,7 @@ func TestControllerSyncJob(t *testing.T) { expectedFailed: 2, expectedCreatedIndexes: sets.New(0, 2), expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "indexed job some pods without index": { parallelism: 2, @@ -749,6 +808,7 @@ func TestControllerSyncJob(t *testing.T) { expectedFailed: 0, expectedCompletedIdxs: "0", expectedPodPatches: 8, + expectedReady: ptr.To[int32](0), }, "indexed job repeated indexes": { parallelism: 5, @@ -770,6 +830,7 @@ func TestControllerSyncJob(t *testing.T) { expectedSucceeded: 1, expectedCompletedIdxs: "0", expectedPodPatches: 5, + expectedReady: ptr.To[int32](0), }, "indexed job with indexes outside of range": { parallelism: 2, @@ -790,6 +851,7 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 0, expectedFailed: 0, expectedPodPatches: 5, + expectedReady: ptr.To[int32](0), }, "suspending a job with satisfied expectations": { // Suspended Job should delete active pods when expectations are @@ -806,6 +868,7 @@ func TestControllerSyncJob(t *testing.T) { expectedConditionStatus: v1.ConditionTrue, expectedConditionReason: "JobSuspended", expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "suspending a job with unsatisfied expectations": { // Unlike the previous test, we expect the controller to NOT suspend the @@ -821,6 +884,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCreations: 0, expectedDeletions: 0, expectedActive: 3, + expectedReady: ptr.To[int32](0), }, "resuming a suspended job": { wasSuspended: true, @@ -834,6 +898,7 @@ func TestControllerSyncJob(t *testing.T) { expectedCondition: &jobConditionSuspended, expectedConditionStatus: v1.ConditionFalse, expectedConditionReason: "JobResumed", + expectedReady: ptr.To[int32](0), }, "suspending a deleted job": { // We would normally expect the active pods to be deleted (see a few test @@ -850,6 +915,7 @@ func TestControllerSyncJob(t *testing.T) { expectedDeletions: 0, expectedActive: 2, expectedPodPatches: 2, + expectedReady: ptr.To[int32](0), }, "indexed job with podIndexLabel feature disabled": { parallelism: 2, @@ -860,13 +926,13 @@ func TestControllerSyncJob(t *testing.T) { expectedActive: 2, expectedCreatedIndexes: sets.New(0, 1), podIndexLabelDisabled: true, + expectedReady: ptr.To[int32](0), }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { logger, _ := ktesting.NewTestContext(t) - defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobReadyPods, tc.jobReadyPodsEnabled)() defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodIndexLabel, !tc.podIndexLabelDisabled)() defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.jobPodReplacementPolicy)() // job manager setup @@ -879,7 +945,7 @@ func TestControllerSyncJob(t *testing.T) { fakeClock = clocktesting.NewFakeClock(time.Now()) } - manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientSet, controller.NoResyncPeriodFunc, fakeClock) + manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientSet, controller.NoResyncPeriodFunc, fakeClock) fakePodControl := controller.FakePodControl{Err: tc.podControllerError, CreateLimit: tc.podLimit} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -887,7 +953,7 @@ func TestControllerSyncJob(t *testing.T) { // job & pods setup job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, tc.completionMode) - job.Spec.Suspend = pointer.Bool(tc.suspend) + job.Spec.Suspend = ptr.To(tc.suspend) if tc.jobPodReplacementPolicy { job.Spec.PodReplacementPolicy = tc.podReplacementPolicy } @@ -1482,7 +1548,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(6), + Completions: ptr.To[int32](6), }, Status: batch.JobStatus{ Active: 1, @@ -1510,8 +1576,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(2), - Parallelism: pointer.Int32(2), + Completions: ptr.To[int32](2), + Parallelism: ptr.To[int32](2), }, Status: batch.JobStatus{ Active: 2, @@ -1537,8 +1603,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(2), - Parallelism: pointer.Int32(2), + Completions: ptr.To[int32](2), + Parallelism: ptr.To[int32](2), }, Status: batch.JobStatus{ Active: 2, @@ -1565,7 +1631,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(6), + Completions: ptr.To[int32](6), }, Status: batch.JobStatus{ Active: 1, @@ -1598,7 +1664,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(7), + Completions: ptr.To[int32](7), }, Status: batch.JobStatus{ Failed: 2, @@ -1680,7 +1746,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(501), + Completions: ptr.To[int32](501), }, }, pods: func() []*v1.Pod { @@ -1703,8 +1769,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { "pod flips from failed to succeeded": { job: batch.Job{ Spec: batch.JobSpec{ - Completions: pointer.Int32(2), - Parallelism: pointer.Int32(2), + Completions: ptr.To[int32](2), + Parallelism: ptr.To[int32](2), }, Status: batch.JobStatus{ UncountedTerminatedPods: &batch.UncountedTerminatedPods{ @@ -1732,8 +1798,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(6), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](6), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []*v1.Pod{ @@ -1742,7 +1808,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { wantStatusUpdates: []batch.JobStatus{ { UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), }, }, }, @@ -1751,8 +1817,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(6), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](6), + BackoffLimitPerIndex: ptr.To[int32](1), }, Status: batch.JobStatus{ Active: 1, @@ -1769,13 +1835,13 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { UncountedTerminatedPods: &batch.UncountedTerminatedPods{ Failed: []types.UID{"a1"}, }, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), }, { Active: 1, Failed: 1, UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), }, }, wantFailedPodsMetric: 1, @@ -1785,8 +1851,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { job: batch.Job{ Spec: batch.JobSpec{ CompletionMode: &indexedCompletion, - Completions: pointer.Int32(6), - BackoffLimitPerIndex: pointer.Int32(1), + Completions: ptr.To[int32](6), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []*v1.Pod{ @@ -1795,14 +1861,14 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { wantRmFinalizers: 1, wantStatusUpdates: []batch.JobStatus{ { - FailedIndexes: pointer.String("1"), + FailedIndexes: ptr.To("1"), UncountedTerminatedPods: &batch.UncountedTerminatedPods{ Failed: []types.UID{"a"}, }, }, { Failed: 1, - FailedIndexes: pointer.String("1"), + FailedIndexes: ptr.To("1"), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, }, }, @@ -1813,7 +1879,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { t.Run(name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, _ := newControllerFromClient(ctx, clientSet, controller.NoResyncPeriodFunc) + manager, _ := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{Err: tc.podControlErr} metrics.JobPodsFinished.Reset() manager.podControl = &fakePodControl @@ -1909,7 +1975,7 @@ func TestSyncJobPastDeadline(t *testing.T) { expectedDeletions: 1, expectedFailed: 1, expectedCondition: batch.JobFailed, - expectedConditionReason: "DeadlineExceeded", + expectedConditionReason: batch.JobReasonDeadlineExceeded, }, "activeDeadlineSeconds bigger than single pod execution": { parallelism: 1, @@ -1923,7 +1989,7 @@ func TestSyncJobPastDeadline(t *testing.T) { expectedSucceeded: 1, expectedFailed: 1, expectedCondition: batch.JobFailed, - expectedConditionReason: "DeadlineExceeded", + expectedConditionReason: batch.JobReasonDeadlineExceeded, }, "activeDeadlineSeconds times-out before any pod starts": { parallelism: 1, @@ -1932,7 +1998,7 @@ func TestSyncJobPastDeadline(t *testing.T) { startTime: 10, backoffLimit: 6, expectedCondition: batch.JobFailed, - expectedConditionReason: "DeadlineExceeded", + expectedConditionReason: batch.JobReasonDeadlineExceeded, }, "activeDeadlineSeconds with backofflimit reach": { parallelism: 1, @@ -1942,7 +2008,7 @@ func TestSyncJobPastDeadline(t *testing.T) { failedPods: 1, expectedFailed: 1, expectedCondition: batch.JobFailed, - expectedConditionReason: "BackoffLimitExceeded", + expectedConditionReason: batch.JobReasonBackoffLimitExceeded, }, "activeDeadlineSeconds is not triggered when Job is suspended": { suspend: true, @@ -1960,7 +2026,7 @@ func TestSyncJobPastDeadline(t *testing.T) { t.Run(name, func(t *testing.T) { // job manager setup clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientSet, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -1974,7 +2040,7 @@ func TestSyncJobPastDeadline(t *testing.T) { // job & pods setup job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion) job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds - job.Spec.Suspend = pointer.Bool(tc.suspend) + job.Spec.Suspend = ptr.To(tc.suspend) start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0) job.Status.StartTime = &start sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) @@ -2038,7 +2104,7 @@ func TestPastDeadlineJobFinished(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := fake.NewSimpleClientset() fakeClock := clocktesting.NewFakeClock(time.Now().Truncate(time.Second)) - manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady manager.expectations = FakeJobExpectations{ @@ -2071,7 +2137,7 @@ func TestPastDeadlineJobFinished(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { job := newJobWithName(tc.jobName, 1, 1, 6, batch.NonIndexedCompletion) - job.Spec.ActiveDeadlineSeconds = pointer.Int64(1) + job.Spec.ActiveDeadlineSeconds = ptr.To[int64](1) if tc.setStartTime { start := metav1.NewTime(fakeClock.Now()) job.Status.StartTime = &start @@ -2083,7 +2149,7 @@ func TestPastDeadlineJobFinished(t *testing.T) { } var j *batch.Job - err = wait.PollImmediate(200*time.Microsecond, 3*time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(ctx, 200*time.Microsecond, 3*time.Second, true, func(ctx context.Context) (done bool, err error) { j, err = clientset.BatchV1().Jobs(metav1.NamespaceDefault).Get(ctx, job.GetName(), metav1.GetOptions{}) if err != nil { return false, err @@ -2093,12 +2159,12 @@ func TestPastDeadlineJobFinished(t *testing.T) { if err != nil { t.Errorf("Job failed to ensure that start time was set: %v", err) } - err = wait.Poll(100*time.Millisecond, 3*time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (done bool, err error) { j, err = clientset.BatchV1().Jobs(metav1.NamespaceDefault).Get(ctx, job.GetName(), metav1.GetOptions{}) if err != nil { return false, nil } - if getCondition(j, batch.JobFailed, v1.ConditionTrue, "DeadlineExceeded") { + if getCondition(j, batch.JobFailed, v1.ConditionTrue, batch.JobReasonDeadlineExceeded) { if manager.clock.Since(j.Status.StartTime.Time) < time.Duration(*j.Spec.ActiveDeadlineSeconds)*time.Second { return true, errors.New("Job contains DeadlineExceeded condition earlier than expected") } @@ -2117,7 +2183,7 @@ func TestPastDeadlineJobFinished(t *testing.T) { func TestSingleJobFailedCondition(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -2129,7 +2195,7 @@ func TestSingleJobFailedCondition(t *testing.T) { } job := newJob(1, 1, 6, batch.NonIndexedCompletion) - job.Spec.ActiveDeadlineSeconds = pointer.Int64(10) + job.Spec.ActiveDeadlineSeconds = ptr.To[int64](10) start := metav1.Unix(metav1.Now().Time.Unix()-15, 0) job.Status.StartTime = &start job.Status.Conditions = append(job.Status.Conditions, *newCondition(batch.JobFailed, v1.ConditionFalse, "DeadlineExceeded", "Job was active longer than specified deadline", realClock.Now())) @@ -2157,7 +2223,7 @@ func TestSingleJobFailedCondition(t *testing.T) { func TestSyncJobComplete(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -2183,7 +2249,7 @@ func TestSyncJobComplete(t *testing.T) { func TestSyncJobDeleted(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, _ := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, _ := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -2266,15 +2332,15 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { Action: batch.PodFailurePolicyActionIgnore, OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ - ContainerName: pointer.String("main-container"), + ContainerName: ptr.To("main-container"), Operator: batch.PodFailurePolicyOnExitCodesOpIn, Values: []int32{1, 2, 3}, }, @@ -2282,7 +2348,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Action: batch.PodFailurePolicyActionFailJob, OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ - ContainerName: pointer.String("main-container"), + ContainerName: ptr.To("main-container"), Operator: batch.PodFailurePolicyOnExitCodesOpIn, Values: []int32{5, 6, 7}, }, @@ -2330,9 +2396,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2368,9 +2434,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2397,7 +2463,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2413,9 +2479,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2425,7 +2491,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailureTarget, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2452,7 +2518,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2468,9 +2534,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2480,7 +2546,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailureTarget, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/already-deleted-pod failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2507,7 +2573,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/already-deleted-pod failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2523,9 +2589,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2562,9 +2628,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2596,7 +2662,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/mypod-1 failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2613,9 +2679,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Selector: validSelector, Template: validTemplate, CompletionMode: &indexedCompletionMode, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2642,7 +2708,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2658,9 +2724,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -2695,7 +2761,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container main-container for pod default/mypod-0 failed with exit code 42 matching FailJob rule at index 0", }, }, @@ -2711,9 +2777,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -2758,9 +2824,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2797,7 +2863,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container init-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", }, }, @@ -2813,9 +2879,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2859,9 +2925,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2896,9 +2962,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: onExitCodeRules, }, @@ -2924,7 +2990,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "BackoffLimitExceeded", + Reason: batch.JobReasonBackoffLimitExceeded, Message: "Job has reached the specified backoff limit", }, }, @@ -2940,9 +3006,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -2993,9 +3059,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3059,9 +3125,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3103,9 +3169,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3150,9 +3216,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](6), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3185,7 +3251,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0", }, }, @@ -3199,10 +3265,10 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { TypeMeta: metav1.TypeMeta{Kind: "Job"}, ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Parallelism: pointer.Int32(1), + Parallelism: ptr.To[int32](1), Selector: validSelector, Template: validTemplate, - BackoffLimit: pointer.Int32(0), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3233,10 +3299,10 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { TypeMeta: metav1.TypeMeta{Kind: "Job"}, ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Parallelism: pointer.Int32(1), + Parallelism: ptr.To[int32](1), Selector: validSelector, Template: validTemplate, - BackoffLimit: pointer.Int32(0), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3274,7 +3340,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { tc.job.Spec.PodReplacementPolicy = podReplacementPolicy(batch.Failed) } clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -3325,8 +3391,8 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { if actual.Status.Failed != tc.wantStatusFailed { t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.wantStatusFailed, actual.Status.Failed) } - if pointer.Int32Deref(actual.Status.Terminating, 0) != pointer.Int32Deref(tc.wantStatusTerminating, 0) { - t.Errorf("unexpected number of terminating pods. Expected %d, saw %d\n", pointer.Int32Deref(tc.wantStatusTerminating, 0), pointer.Int32Deref(actual.Status.Terminating, 0)) + if ptr.Deref(actual.Status.Terminating, 0) != ptr.Deref(tc.wantStatusTerminating, 0) { + t.Errorf("unexpected number of terminating pods. Expected %d, saw %d\n", ptr.Deref(tc.wantStatusTerminating, 0), ptr.Deref(actual.Status.Terminating, 0)) } }) } @@ -3371,11 +3437,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3387,7 +3453,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Failed: 1, Succeeded: 2, CompletedIndexes: "0,1", - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, Conditions: []batch.JobCondition{ { @@ -3405,11 +3471,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3418,7 +3484,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Active: 2, UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), }, }, "single failed pod replaced already": { @@ -3429,11 +3495,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3444,7 +3510,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Active: 2, Failed: 1, UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), }, }, "single failed index due to exceeding the backoff limit per index, the job continues": { @@ -3455,11 +3521,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3468,7 +3534,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Active: 1, Failed: 1, - FailedIndexes: pointer.String("0"), + FailedIndexes: ptr.To("0"), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, }, }, @@ -3481,11 +3547,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3516,7 +3582,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Active: 1, Failed: 1, - FailedIndexes: pointer.String("0"), + FailedIndexes: ptr.To("0"), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, }, }, @@ -3529,11 +3595,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](6), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3565,19 +3631,19 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Active: 0, Failed: 1, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, Conditions: []batch.JobCondition{ { Type: batch.JobFailureTarget, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container x for pod default/mypod-0 failed with exit code 3 matching FailJob rule at index 0", }, { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "PodFailurePolicy", + Reason: batch.JobReasonPodFailurePolicy, Message: "Container x for pod default/mypod-0 failed with exit code 3 matching FailJob rule at index 0", }, }, @@ -3592,11 +3658,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(6), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](6), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -3628,7 +3694,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Active: 2, Failed: 0, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, }, }, @@ -3640,11 +3706,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(1), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](1), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3654,13 +3720,13 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Failed: 2, Succeeded: 0, - FailedIndexes: pointer.String(""), + FailedIndexes: ptr.To(""), UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, Conditions: []batch.JobCondition{ { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "BackoffLimitExceeded", + Reason: batch.JobReasonBackoffLimitExceeded, Message: "Job has reached the specified backoff limit", }, }, @@ -3674,11 +3740,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3688,14 +3754,14 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Failed: 1, Succeeded: 1, - FailedIndexes: pointer.String("0"), + FailedIndexes: ptr.To("0"), CompletedIndexes: "1", UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, Conditions: []batch.JobCondition{ { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "FailedIndexes", + Reason: batch.JobReasonFailedIndexes, Message: "Job has failed indexes", }, }, @@ -3709,12 +3775,12 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(4), - Completions: pointer.Int32(4), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](4), + Completions: ptr.To[int32](4), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), - MaxFailedIndexes: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), + MaxFailedIndexes: ptr.To[int32](1), }, }, pods: []v1.Pod{ @@ -3726,14 +3792,14 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { wantStatus: batch.JobStatus{ Failed: 3, Succeeded: 1, - FailedIndexes: pointer.String("0,2"), + FailedIndexes: ptr.To("0,2"), CompletedIndexes: "1", UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, Conditions: []batch.JobCondition{ { Type: batch.JobFailed, Status: v1.ConditionTrue, - Reason: "MaxFailedIndexesExceeded", + Reason: batch.JobReasonMaxFailedIndexesExceeded, Message: "Job has exceeded the specified maximal number of failed indexes", }, }, @@ -3747,14 +3813,14 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { Spec: batch.JobSpec{ Selector: validSelector, Template: validTemplate, - Parallelism: pointer.Int32(3), - Completions: pointer.Int32(3), - BackoffLimit: pointer.Int32(math.MaxInt32), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](3), + BackoffLimit: ptr.To[int32](math.MaxInt32), CompletionMode: completionModePtr(batch.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), }, Status: batch.JobStatus{ - FailedIndexes: pointer.String("0"), + FailedIndexes: ptr.To("0"), CompletedIndexes: "1", }, }, @@ -3775,7 +3841,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(now) - manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -3833,7 +3899,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) { t.Run(name, func(t *testing.T) { t.Cleanup(setDurationDuringTest(&DefaultJobApiBackOff, fastJobApiBackoff)) fakeClient := clocktesting.NewFakeClock(time.Now()) - manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClient) + manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClient) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -3871,7 +3937,7 @@ func TestUpdateJobRequeue(t *testing.T) { "spec update": { oldJob: newJob(1, 1, 1, batch.IndexedCompletion), updateFn: func(job *batch.Job) { - job.Spec.Suspend = pointer.Bool(false) + job.Spec.Suspend = ptr.To(false) job.Generation++ }, wantRequeuedImmediately: true, @@ -3886,7 +3952,7 @@ func TestUpdateJobRequeue(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady @@ -3955,7 +4021,7 @@ func TestGetPodCreationInfoForIndependentIndexes(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { fakeClock := clocktesting.NewFakeClock(now) - manager, _ := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + manager, _ := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) gotIndexesToAdd, gotRemainingTime := manager.getPodCreationInfoForIndependentIndexes(logger, tc.indexesToAdd, tc.podsWithDelayedDeletionPerIndex) if diff := cmp.Diff(tc.wantIndexesToAdd, gotIndexesToAdd); diff != "" { t.Fatalf("Unexpected indexes to add: %s", diff) @@ -3970,7 +4036,7 @@ func TestGetPodCreationInfoForIndependentIndexes(t *testing.T) { func TestJobPodLookup(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady testCases := []struct { @@ -4114,7 +4180,7 @@ func TestGetPodsForJob(t *testing.T) { job.DeletionTimestamp = &metav1.Time{} } clientSet := fake.NewSimpleClientset(job, otherJob) - jm, informer := newControllerFromClient(ctx, clientSet, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady cachedJob := job.DeepCopy() @@ -4158,7 +4224,7 @@ func TestAddPod(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4202,7 +4268,7 @@ func TestAddPodOrphan(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4232,7 +4298,7 @@ func TestUpdatePod(t *testing.T) { logger := klog.FromContext(ctx) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4280,7 +4346,7 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4309,7 +4375,7 @@ func TestUpdatePodChangeControllerRef(t *testing.T) { logger := klog.FromContext(ctx) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4337,7 +4403,7 @@ func TestUpdatePodRelease(t *testing.T) { logger := klog.FromContext(ctx) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4364,7 +4430,7 @@ func TestDeletePod(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4408,7 +4474,7 @@ func TestDeletePodOrphan(t *testing.T) { t.Cleanup(setDurationDuringTest(&syncJobBatchPeriod, 0)) logger, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -4449,7 +4515,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(logger klog.Logger, controll func TestSyncJobExpectations(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -4486,7 +4552,7 @@ func TestWatchJobs(t *testing.T) { clientset := fake.NewSimpleClientset() fakeWatch := watch.NewFake() clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil)) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady @@ -4532,7 +4598,7 @@ func TestWatchPods(t *testing.T) { clientset := fake.NewSimpleClientset(testJob) fakeWatch := watch.NewFake() clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil)) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady @@ -4578,7 +4644,10 @@ func TestWatchOrphanPods(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := fake.NewSimpleClientset() sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) - manager := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset) + manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset) + if err != nil { + t.Fatalf("Error creating Job controller: %v", err) + } manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady @@ -4631,7 +4700,7 @@ func TestWatchOrphanPods(t *testing.T) { t.Fatalf("Creating orphan pod: %v", err) } - if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) { p, err := clientset.CoreV1().Pods(orphanPod.Namespace).Get(context.Background(), orphanPod.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -4655,7 +4724,7 @@ func TestJobApiBackoffReset(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) fakeClock := clocktesting.NewFakeClock(time.Now()) - manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock) + manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -4716,30 +4785,22 @@ func TestJobBackoff(t *testing.T) { newPod.ResourceVersion = "2" testCases := map[string]struct { - requeues int - oldPodPhase v1.PodPhase - phase v1.PodPhase - jobReadyPodsEnabled bool - wantBackoff time.Duration + requeues int + oldPodPhase v1.PodPhase + phase v1.PodPhase + wantBackoff time.Duration }{ - "failure": { + "failure with pod updates batching": { requeues: 0, phase: v1.PodFailed, wantBackoff: syncJobBatchPeriod, }, - "failure with pod updates batching": { - requeues: 0, - phase: v1.PodFailed, - jobReadyPodsEnabled: true, - wantBackoff: syncJobBatchPeriod, - }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobReadyPods, tc.jobReadyPodsEnabled)() clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -4847,7 +4908,7 @@ func TestJobBackoffForOnFailure(t *testing.T) { t.Run(name, func(t *testing.T) { // job manager setup clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -4861,7 +4922,7 @@ func TestJobBackoffForOnFailure(t *testing.T) { // job & pods setup job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion) job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyOnFailure - job.Spec.Suspend = pointer.Bool(tc.suspend) + job.Spec.Suspend = ptr.To(tc.suspend) sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() for i, pod := range newPodList(len(tc.restartCounts), tc.podPhase, job) { @@ -4946,7 +5007,7 @@ func TestJobBackoffOnRestartPolicyNever(t *testing.T) { t.Run(name, func(t *testing.T) { // job manager setup clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -5080,7 +5141,10 @@ func TestFinalizersRemovedExpectations(t *testing.T) { _, ctx := ktesting.NewTestContext(t) clientset := fake.NewSimpleClientset() sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) - manager := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset) + manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset) + if err != nil { + t.Fatalf("Error creating Job controller: %v", err) + } manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady manager.podControl = &controller.FakePodControl{Err: errors.New("fake pod controller error")} @@ -5133,7 +5197,7 @@ func TestFinalizersRemovedExpectations(t *testing.T) { update := pods[0].DeepCopy() update.Finalizers = nil update.ResourceVersion = "1" - err := clientset.Tracker().Update(podsResource, update, update.Namespace) + err = clientset.Tracker().Update(podsResource, update, update.Namespace) if err != nil { t.Errorf("Removing finalizer: %v", err) } @@ -5163,7 +5227,7 @@ func TestFinalizersRemovedExpectations(t *testing.T) { uids = sets.New(string(pods[2].UID)) var diff string - if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) { gotExpectedUIDs = manager.finalizerExpectations.getExpectedUIDs(jobKey) diff = cmp.Diff(uids, gotExpectedUIDs) return diff == "", nil @@ -5172,6 +5236,67 @@ func TestFinalizersRemovedExpectations(t *testing.T) { } } +func TestFinalizerCleanup(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientset := fake.NewSimpleClientset() + sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) + manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset) + if err != nil { + t.Fatalf("Error creating Job controller: %v", err) + } + manager.podStoreSynced = alwaysReady + manager.jobStoreSynced = alwaysReady + + // Initialize the controller with 0 workers to make sure the + // pod finalizers are not removed by the "syncJob" function. + go manager.Run(ctx, 0) + + // Start the Pod and Job informers. + sharedInformers.Start(ctx.Done()) + sharedInformers.WaitForCacheSync(ctx.Done()) + + // Create a simple Job + job := newJob(1, 1, 1, batch.NonIndexedCompletion) + job, err = clientset.BatchV1().Jobs(job.GetNamespace()).Create(ctx, job, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Creating job: %v", err) + } + + // Create a Pod with the job tracking finalizer + pod := newPod("test-pod", job) + pod.Finalizers = append(pod.Finalizers, batch.JobTrackingFinalizer) + pod, err = clientset.CoreV1().Pods(pod.GetNamespace()).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Creating pod: %v", err) + } + + // Mark Job as complete. + job.Status.Conditions = append(job.Status.Conditions, batch.JobCondition{ + Type: batch.JobComplete, + Status: v1.ConditionTrue, + }) + _, err = clientset.BatchV1().Jobs(job.GetNamespace()).UpdateStatus(ctx, job, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("Updating job status: %v", err) + } + + // Verify the pod finalizer is removed for a finished Job, + // even if the jobs pods are not tracked by the main reconciliation loop. + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) { + p, err := clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return !hasJobTrackingFinalizer(p), nil + }); err != nil { + t.Errorf("Waiting for Pod to get the finalizer removed: %v", err) + } + +} + func checkJobCompletionLabel(t *testing.T, p *v1.PodTemplateSpec) { t.Helper() labels := p.GetLabels() diff --git a/pkg/controller/job/metrics/metrics.go b/pkg/controller/job/metrics/metrics.go index 1d49efc232493..39a82f53f9b9b 100644 --- a/pkg/controller/job/metrics/metrics.go +++ b/pkg/controller/job/metrics/metrics.go @@ -38,7 +38,7 @@ var ( Name: "job_sync_duration_seconds", Help: "The time it took to sync a job", StabilityLevel: metrics.STABLE, - Buckets: metrics.ExponentialBuckets(0.001, 2, 15), + Buckets: metrics.ExponentialBuckets(0.004, 2, 15), }, []string{"completion_mode", "result", "action"}, ) @@ -114,6 +114,35 @@ var ( that have the finalizer batch.kubernetes.io/job-tracking The event label can be "add" or "delete".`, }, []string{"event"}) + + // JobFinishedIndexesTotal records the number of finished indexes. + JobFinishedIndexesTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: JobControllerSubsystem, + Name: "job_finished_indexes_total", + Help: `The number of finished indexes. Possible values for the + status label are: "succeeded", "failed". Possible values for the + backoffLimit label are: "perIndex" and "global"`, + }, + []string{"status", "backoffLimit"}) + + // JobPodsCreationTotal records the number of pods created by the job controller + // based on the reason for their creation (i.e. if PodReplacementPolicy was specified) + // and the status of the creation (i.e. if the Pod creation succeeded or failed). + // Possible label values: + // reason: new, recreate_terminating_or_failed, recreate_failed + // status: succeeded, failed + JobPodsCreationTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: JobControllerSubsystem, + Name: "job_pods_creation_total", + Help: `The number of Pods created by the Job controller labelled with a reason for the Pod creation. +This metric also distinguishes between Pods created using different PodReplacementPolicy settings. +Possible values of the "reason" label are: +"new", "recreate_terminating_or_failed", "recreate_failed". +Possible values of the "status" label are: +"succeeded", "failed".`, + }, []string{"reason", "status"}) ) const ( @@ -136,7 +165,7 @@ const ( // parallelism. JobSyncActionPodsDeleted = "pods_deleted" - // Possible values for "result" label in the above metrics. + // Possible values for "result" and "status" (job_pods_creation_total) labels in the above metrics. Succeeded = "succeeded" Failed = "failed" @@ -145,6 +174,12 @@ const ( // metric. Add = "add" Delete = "delete" + + // Possible values for "reason" label in the job_pods_creation_total metric. + + PodCreateNew = "new" + PodRecreateTerminatingOrFailed = "recreate_terminating_or_failed" + PodRecreateFailed = "recreate_failed" ) var registerMetrics sync.Once @@ -158,5 +193,7 @@ func Register() { legacyregistry.MustRegister(JobPodsFinished) legacyregistry.MustRegister(PodFailuresHandledByFailurePolicy) legacyregistry.MustRegister(TerminatedPodsTrackingFinalizerTotal) + legacyregistry.MustRegister(JobFinishedIndexesTotal) + legacyregistry.MustRegister(JobPodsCreationTotal) }) } diff --git a/pkg/controller/job/pod_failure_policy_test.go b/pkg/controller/job/pod_failure_policy_test.go index 28e6b6d89003f..a0e8b61ee3618 100644 --- a/pkg/controller/job/pod_failure_policy_test.go +++ b/pkg/controller/job/pod_failure_policy_test.go @@ -27,7 +27,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/features" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestMatchPodFailurePolicy(t *testing.T) { @@ -83,7 +83,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { }, }, }, - wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"), + wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"), wantCountFailed: true, wantAction: &failJob, }, @@ -161,7 +161,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { }, }, }, - wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"), + wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"), wantCountFailed: true, wantAction: &failJob, }, @@ -244,7 +244,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { }, }, }, - wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 0"), + wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 0"), wantCountFailed: true, wantAction: &failJob, }, @@ -395,7 +395,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { }, }, }, - wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 1 matching FailJob rule at index 0"), + wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 1 matching FailJob rule at index 0"), wantCountFailed: true, wantAction: &failJob, }, @@ -434,7 +434,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { }, }, }, - wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 6 matching FailJob rule at index 1"), + wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 6 matching FailJob rule at index 1"), wantCountFailed: true, wantAction: &failJob, }, @@ -736,7 +736,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { }, }, }, - wantJobFailureMessage: pointer.String("Pod default/mypod has condition DisruptionTarget matching FailJob rule at index 0"), + wantJobFailureMessage: ptr.To("Pod default/mypod has condition DisruptionTarget matching FailJob rule at index 0"), wantCountFailed: true, wantAction: &failJob, }, diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 13fc26ddfffab..10f0dce5a447d 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -22,18 +22,15 @@ import ( "net" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" informers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" clientset "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" ) // CIDRAllocatorType is the type of the allocator to use. @@ -43,9 +40,6 @@ const ( // RangeAllocatorType is the allocator that uses an internal CIDR // range allocator to do node CIDR range allocations. RangeAllocatorType CIDRAllocatorType = "RangeAllocator" - // MultiCIDRRangeAllocatorType is the allocator that uses an internal CIDR - // range allocator to do node CIDR range allocations. - MultiCIDRRangeAllocatorType CIDRAllocatorType = "MultiCIDRRangeAllocator" // CloudAllocatorType is the allocator that uses cloud platform // support to do node CIDR range allocations. CloudAllocatorType CIDRAllocatorType = "CloudAllocator" @@ -119,7 +113,7 @@ type nodeReservedCIDRs struct { } // New creates a new CIDR range allocator. -func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { +func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { logger := klog.FromContext(ctx) nodeList, err := listNodes(logger, kubeClient) if err != nil { @@ -129,12 +123,6 @@ func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovide switch allocatorType { case RangeAllocatorType: return NewCIDRRangeAllocator(logger, kubeClient, nodeInformer, allocatorParams, nodeList) - case MultiCIDRRangeAllocatorType: - if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { - return nil, fmt.Errorf("invalid CIDR allocator type: %v, feature gate %v must be enabled", allocatorType, features.MultiCIDRRangeAllocator) - } - return NewMultiCIDRRangeAllocator(ctx, kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil) - case CloudAllocatorType: return NewCloudCIDRAllocator(logger, kubeClient, cloud, nodeInformer) default: diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go deleted file mode 100644 index 1c3eedc7d1768..0000000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "math" - - cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" -) - -// A PriorityQueue implementation based on https://pkg.go.dev/container/heap#example-package-PriorityQueue - -// An PriorityQueueItem is something we manage in a priority queue. -type PriorityQueueItem struct { - clusterCIDR *cidrset.ClusterCIDR - // labelMatchCount is the first determinant of priority. - labelMatchCount int - // selectorString is a string representation of the labelSelector associated with the cidrSet. - selectorString string - // index is needed by update and is maintained by the heap.Interface methods. - index int // The index of the item in the heap. -} - -// A PriorityQueue implements heap.Interface and holds PriorityQueueItems. -type PriorityQueue []*PriorityQueueItem - -func (pq PriorityQueue) Len() int { return len(pq) } - -// Less compares the priority queue items, to store in a min heap. -// Less(i,j) == true denotes i has higher priority than j. -func (pq PriorityQueue) Less(i, j int) bool { - if pq[i].labelMatchCount != pq[j].labelMatchCount { - // P0: CidrSet with higher number of matching labels has the highest priority. - return pq[i].labelMatchCount > pq[j].labelMatchCount - } - - // If the count of matching labels is equal, compare the max allocatable pod CIDRs. - if pq[i].maxAllocatable() != pq[j].maxAllocatable() { - // P1: CidrSet with fewer allocatable pod CIDRs has higher priority. - return pq[i].maxAllocatable() < pq[j].maxAllocatable() - } - - // If the value of allocatable pod CIDRs is equal, compare the node mask size. - if pq[i].nodeMaskSize() != pq[j].nodeMaskSize() { - // P2: CidrSet with a PerNodeMaskSize having fewer IPs has higher priority. - // For example, `27` (32 IPs) picked before `25` (128 IPs). - return pq[i].nodeMaskSize() > pq[j].nodeMaskSize() - } - - // If the per node mask size are equal compare the CIDR labels. - if pq[i].selectorString != pq[j].selectorString { - // P3: CidrSet having label with lower alphanumeric value has higher priority. - return pq[i].selectorString < pq[j].selectorString - } - - // P4: CidrSet having an alpha-numerically smaller IP address value has a higher priority. - return pq[i].cidrLabel() < pq[j].cidrLabel() -} - -func (pq PriorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *PriorityQueue) Push(x interface{}) { - n := len(*pq) - if item, ok := x.(*PriorityQueueItem); ok { - item.index = n - *pq = append(*pq, item) - } -} - -func (pq *PriorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - old[n-1] = nil // avoid memory leak. - item.index = -1 // for safety. - *pq = old[0 : n-1] - return item -} - -// maxAllocatable computes the minimum value of the MaxCIDRs for a ClusterCIDR. -// It compares the MaxCIDRs for each CIDR family and returns the minimum. -// e.g. IPv4 - 10.0.0.0/16 PerNodeMaskSize: 24 MaxCIDRs = 256 -// IPv6 - ff:ff::/120 PerNodeMaskSize: 120 MaxCIDRs = 1 -// MaxAllocatable for this ClusterCIDR = 1 -func (pqi *PriorityQueueItem) maxAllocatable() int { - ipv4Allocatable := math.MaxInt - ipv6Allocatable := math.MaxInt - - if pqi.clusterCIDR.IPv4CIDRSet != nil { - ipv4Allocatable = pqi.clusterCIDR.IPv4CIDRSet.MaxCIDRs - } - - if pqi.clusterCIDR.IPv6CIDRSet != nil { - ipv6Allocatable = pqi.clusterCIDR.IPv6CIDRSet.MaxCIDRs - } - - if ipv4Allocatable < ipv6Allocatable { - return ipv4Allocatable - } - - return ipv6Allocatable -} - -// nodeMaskSize returns IPv4 NodeMaskSize if present, else returns IPv6 NodeMaskSize. -// Note the requirement: 32 - IPv4 NodeMaskSize == 128 - IPv6 NodeMaskSize -// Due to the above requirement it does not matter which NodeMaskSize we compare. -func (pqi *PriorityQueueItem) nodeMaskSize() int { - if pqi.clusterCIDR.IPv4CIDRSet != nil { - return pqi.clusterCIDR.IPv4CIDRSet.NodeMaskSize - } - - return pqi.clusterCIDR.IPv6CIDRSet.NodeMaskSize -} - -// cidrLabel returns IPv4 CIDR if present, else returns IPv6 CIDR. -func (pqi *PriorityQueueItem) cidrLabel() string { - if pqi.clusterCIDR.IPv4CIDRSet != nil { - return pqi.clusterCIDR.IPv4CIDRSet.Label - } - - return pqi.clusterCIDR.IPv6CIDRSet.Label -} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go deleted file mode 100644 index 357592f6ba11e..0000000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "container/heap" - "testing" - - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" - utilnet "k8s.io/utils/net" -) - -func createTestPriorityQueueItem(name, cidr, selectorString string, labelMatchCount, perNodeHostBits int) *PriorityQueueItem { - _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr) - cidrSet, _ := multicidrset.NewMultiCIDRSet(clusterCIDR, perNodeHostBits) - - return &PriorityQueueItem{ - clusterCIDR: &multicidrset.ClusterCIDR{ - Name: name, - IPv4CIDRSet: cidrSet, - }, - labelMatchCount: labelMatchCount, - selectorString: selectorString, - } -} - -func TestPriorityQueue(t *testing.T) { - - pqi1 := createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8) - pqi2 := createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8) - pqi3 := createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8) - pqi4 := createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6) - pqi5 := createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6) - pqi6 := createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6) - - for _, testQueue := range []struct { - name string - items []*PriorityQueueItem - want *PriorityQueueItem - }{ - {"Test queue with single item", []*PriorityQueueItem{pqi1}, pqi1}, - {"Test queue with items having different labelMatchCount", []*PriorityQueueItem{pqi1, pqi2}, pqi2}, - {"Test queue with items having same labelMatchCount, different max Allocatable Pod CIDRs", []*PriorityQueueItem{pqi1, pqi2, pqi3}, pqi2}, - {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, different PerNodeMaskSize", []*PriorityQueueItem{pqi1, pqi2, pqi4}, pqi4}, - {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5}, pqi4}, - {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5, pqi6}, pqi4}, - } { - pq := make(PriorityQueue, 0) - for _, pqi := range testQueue.items { - heap.Push(&pq, pqi) - } - - got := heap.Pop(&pq) - - if got != testQueue.want { - t.Errorf("Error, wanted: %+v, got: %+v", testQueue.want, got) - } - } -} - -func TestLess(t *testing.T) { - - for _, testQueue := range []struct { - name string - items []*PriorityQueueItem - want bool - }{ - { - name: "different labelMatchCount, i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 2, 8), - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 1, 8), - }, - want: true, - }, - { - name: "different labelMatchCount, i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8), - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), - }, - want: false, - }, - { - name: "same labelMatchCount, different max allocatable cidrs, i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), - createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8), - }, - want: true, - }, - { - name: "same labelMatchCount, different max allocatable cidrs, i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/16", "foo=bar,name=test2", 2, 8), - createTestPriorityQueueItem("cidr3", "172.16.0.0/24", "foo=bar,name=test3", 2, 8), - }, - want: false, - }, - { - name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/26", "foo=bar,name=test2", 2, 6), - createTestPriorityQueueItem("cidr4", "10.1.1.0/24", "abc=bar,name=test4", 2, 8), - }, - want: true, - }, - { - name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), - }, - want: false, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6), - }, - want: true, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6), - }, - want: false, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6), - }, - want: true, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr6", "10.0.3.0/26", "abc=bar,name=test4", 2, 6), - }, - want: false, - }, - } { - var pq PriorityQueue - pq = testQueue.items - got := pq.Less(0, 1) - if got != testQueue.want { - t.Errorf("Error, wanted: %v, got: %v\nTest %q \npq[0]: %+v \npq[1]: %+v ", testQueue.want, got, testQueue.name, pq[0], pq[1]) - } - } -} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go deleted file mode 100644 index 0f3b6a3ef1b73..0000000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go +++ /dev/null @@ -1,1322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "container/heap" - "context" - "errors" - "fmt" - "math" - "net" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - informers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - corelisters "k8s.io/client-go/listers/core/v1" - networkinglisters "k8s.io/client-go/listers/networking/v1alpha1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - nodeutil "k8s.io/component-helpers/node/util" - "k8s.io/klog/v2" - cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" - controllerutil "k8s.io/kubernetes/pkg/controller/util/node" - "k8s.io/kubernetes/pkg/util/slice" - netutil "k8s.io/utils/net" -) - -const ( - defaultClusterCIDRKey = "kubernetes.io/clusterCIDR" - defaultClusterCIDRValue = "default" - defaultClusterCIDRName = "default-cluster-cidr" - defaultClusterCIDRAPIVersion = "networking.k8s.io/v1alpha1" - clusterCIDRFinalizer = "networking.k8s.io/cluster-cidr-finalizer" - ipv4MaxCIDRMask = 32 - ipv6MaxCIDRMask = 128 - minPerNodeHostBits = 4 -) - -// CIDRs are reserved, then node resource is patched with them. -// multiCIDRNodeReservedCIDRs holds the reservation info for a node. -type multiCIDRNodeReservedCIDRs struct { - nodeReservedCIDRs - clusterCIDR *cidrset.ClusterCIDR -} - -type multiCIDRRangeAllocator struct { - client clientset.Interface - // nodeLister is able to list/get nodes and is populated by the shared informer passed to controller. - nodeLister corelisters.NodeLister - // nodesSynced returns true if the node shared informer has been synced at least once. - nodesSynced cache.InformerSynced - // clusterCIDRLister is able to list/get clustercidrs and is populated by the shared informer passed to controller. - clusterCIDRLister networkinglisters.ClusterCIDRLister - // clusterCIDRSynced returns true if the clustercidr shared informer has been synced at least once. - clusterCIDRSynced cache.InformerSynced - // Channel that is used to pass updating Nodes and their reserved CIDRs to the background. - // This increases a throughput of CIDR assignment by not blocking on long operations. - nodeCIDRUpdateChannel chan multiCIDRNodeReservedCIDRs - broadcaster record.EventBroadcaster - recorder record.EventRecorder - // queues are where incoming work is placed to de-dup and to allow "easy" - // rate limited requeues on errors - cidrQueue workqueue.RateLimitingInterface - nodeQueue workqueue.RateLimitingInterface - - // lock guards cidrMap to avoid races in CIDR allocation. - lock *sync.Mutex - // cidrMap maps ClusterCIDR labels to internal ClusterCIDR objects. - cidrMap map[string][]*cidrset.ClusterCIDR -} - -// NewMultiCIDRRangeAllocator returns a CIDRAllocator to allocate CIDRs for node (one for each ip family). -// Caller must always pass in a list of existing nodes to the new allocator. -// NodeList is only nil in testing. -func NewMultiCIDRRangeAllocator( - ctx context.Context, - client clientset.Interface, - nodeInformer informers.NodeInformer, - clusterCIDRInformer networkinginformers.ClusterCIDRInformer, - allocatorParams CIDRAllocatorParams, - nodeList *v1.NodeList, - testCIDRMap map[string][]*cidrset.ClusterCIDR, -) (CIDRAllocator, error) { - logger := klog.FromContext(ctx) - if client == nil { - logger.Error(nil, "kubeClient is nil when starting multi CIDRRangeAllocator") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - - eventBroadcaster := record.NewBroadcaster() - eventSource := v1.EventSource{ - Component: "multiCIDRRangeAllocator", - } - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, eventSource) - - ra := &multiCIDRRangeAllocator{ - client: client, - nodeLister: nodeInformer.Lister(), - nodesSynced: nodeInformer.Informer().HasSynced, - clusterCIDRLister: clusterCIDRInformer.Lister(), - clusterCIDRSynced: clusterCIDRInformer.Informer().HasSynced, - nodeCIDRUpdateChannel: make(chan multiCIDRNodeReservedCIDRs, cidrUpdateQueueSize), - broadcaster: eventBroadcaster, - recorder: recorder, - cidrQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "multi_cidr_range_allocator_cidr"), - nodeQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "multi_cidr_range_allocator_node"), - lock: &sync.Mutex{}, - cidrMap: make(map[string][]*cidrset.ClusterCIDR, 0), - } - - // testCIDRMap is only set for testing purposes. - if len(testCIDRMap) > 0 { - ra.cidrMap = testCIDRMap - logger.Info("TestCIDRMap should only be set for testing purposes, if this is seen in production logs, it might be a misconfiguration or a bug") - } - - ccList, err := listClusterCIDRs(ctx, client) - if err != nil { - return nil, err - } - - if ccList == nil { - ccList = &networkingv1alpha1.ClusterCIDRList{} - } - createDefaultClusterCIDR(logger, ccList, allocatorParams) - - // Regenerate the cidrMaps from the existing ClusterCIDRs. - for _, clusterCIDR := range ccList.Items { - logger.Info("Regenerating existing ClusterCIDR", "clusterCIDR", clusterCIDR) - // Create an event for invalid ClusterCIDRs, do not crash on failures. - if err := ra.reconcileBootstrap(ctx, &clusterCIDR); err != nil { - logger.Error(err, "Error while regenerating existing ClusterCIDR") - ra.recorder.Event(&clusterCIDR, "Warning", "InvalidClusterCIDR encountered while regenerating ClusterCIDR during bootstrap.", err.Error()) - } - } - - clusterCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err == nil { - ra.cidrQueue.Add(key) - } - }, - UpdateFunc: func(old, new interface{}) { - key, err := cache.MetaNamespaceKeyFunc(new) - if err == nil { - ra.cidrQueue.Add(key) - } - }, - DeleteFunc: func(obj interface{}) { - // IndexerInformer uses a delta nodeQueue, therefore for deletes we have to use this - // key function. - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - ra.cidrQueue.Add(key) - } - }, - }) - - if allocatorParams.ServiceCIDR != nil { - ra.filterOutServiceRange(logger, allocatorParams.ServiceCIDR) - } else { - logger.Info("No Service CIDR provided. Skipping filtering out service addresses") - } - - if allocatorParams.SecondaryServiceCIDR != nil { - ra.filterOutServiceRange(logger, allocatorParams.SecondaryServiceCIDR) - } else { - logger.Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses") - } - - if nodeList != nil { - for _, node := range nodeList.Items { - if len(node.Spec.PodCIDRs) == 0 { - logger.V(4).Info("Node has no CIDR, ignoring", "node", klog.KObj(&node)) - continue - } - logger.Info("Node has CIDR, occupying it in CIDR map", "node", klog.KObj(&node), "podCIDRs", node.Spec.PodCIDRs) - if err := ra.occupyCIDRs(logger, &node); err != nil { - // This will happen if: - // 1. We find garbage in the podCIDRs field. Retrying is useless. - // 2. CIDR out of range: This means ClusterCIDR is not yet created - // This error will keep crashing controller-manager until the - // appropriate ClusterCIDR has been created - return nil, err - } - } - } - - nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err == nil { - ra.nodeQueue.Add(key) - } - }, - UpdateFunc: func(old, new interface{}) { - key, err := cache.MetaNamespaceKeyFunc(new) - if err == nil { - ra.nodeQueue.Add(key) - } - }, - DeleteFunc: func(obj interface{}) { - // The informer cache no longer has the object, and since Node doesn't have a finalizer, - // we don't see the Update with DeletionTimestamp != 0. - // TODO: instead of executing the operation directly in the handler, build a small cache with key node.Name - // and value PodCIDRs use ReleaseCIDR on the reconcile loop so we can retry on `ReleaseCIDR` failures. - ra.ReleaseCIDR(logger, obj.(*v1.Node)) - // IndexerInformer uses a delta nodeQueue, therefore for deletes we have to use this - // key function. - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - ra.nodeQueue.Add(key) - } - }, - }) - - return ra, nil -} - -func (r *multiCIDRRangeAllocator) Run(ctx context.Context) { - defer utilruntime.HandleCrash() - - // Start event processing pipeline. - logger := klog.FromContext(ctx) - r.broadcaster.StartStructuredLogging(0) - logger.Info("Started sending events to API Server") - r.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: r.client.CoreV1().Events("")}) - defer r.broadcaster.Shutdown() - - defer r.cidrQueue.ShutDown() - defer r.nodeQueue.ShutDown() - - logger.Info("Starting Multi CIDR Range allocator") - defer logger.Info("Shutting down Multi CIDR Range allocator") - - if !cache.WaitForNamedCacheSync("multi_cidr_range_allocator", ctx.Done(), r.nodesSynced, r.clusterCIDRSynced) { - return - } - - for i := 0; i < cidrUpdateWorkers; i++ { - go wait.UntilWithContext(ctx, r.runCIDRWorker, time.Second) - go wait.UntilWithContext(ctx, r.runNodeWorker, time.Second) - } - - <-ctx.Done() -} - -// runWorker is a long-running function that will continually call the -// processNextWorkItem function in order to read and process a message on the -// cidrQueue. -func (r *multiCIDRRangeAllocator) runCIDRWorker(ctx context.Context) { - for r.processNextCIDRWorkItem(ctx) { - } -} - -// processNextWorkItem will read a single work item off the cidrQueue and -// attempt to process it, by calling the syncHandler. -func (r *multiCIDRRangeAllocator) processNextCIDRWorkItem(ctx context.Context) bool { - logger := klog.FromContext(ctx) - obj, shutdown := r.cidrQueue.Get() - if shutdown { - return false - } - - // We wrap this block in a func so we can defer c.cidrQueue.Done. - err := func(ctx context.Context, obj interface{}) error { - // We call Done here so the cidrQueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the cidrQueue and attempted again after a back-off - // period. - defer r.cidrQueue.Done(obj) - var key string - var ok bool - // We expect strings to come off the cidrQueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // cidrQueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // cidrQueue. - if key, ok = obj.(string); !ok { - // As the item in the cidrQueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - r.cidrQueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in cidrQueue but got %#v", obj)) - return nil - } - // Run the syncHandler, passing it the namespace/name string of the - // Foo resource to be synced. - if err := r.syncClusterCIDR(ctx, key); err != nil { - // Put the item back on the cidrQueue to handle any transient errors. - r.cidrQueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) - } - // Finally, if no error occurs we Forget this item so it does not - // get cidrQueued again until another change happens. - r.cidrQueue.Forget(obj) - logger.Info("Successfully synced", "key", key) - return nil - }(ctx, obj) - - if err != nil { - utilruntime.HandleError(err) - return true - } - - return true -} - -func (r *multiCIDRRangeAllocator) runNodeWorker(ctx context.Context) { - for r.processNextNodeWorkItem(ctx) { - } -} - -// processNextWorkItem will read a single work item off the cidrQueue and -// attempt to process it, by calling the syncHandler. -func (r *multiCIDRRangeAllocator) processNextNodeWorkItem(ctx context.Context) bool { - obj, shutdown := r.nodeQueue.Get() - if shutdown { - return false - } - - // We wrap this block in a func so we can defer c.cidrQueue.Done. - err := func(logger klog.Logger, obj interface{}) error { - // We call Done here so the workNodeQueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the nodeQueue and attempted again after a back-off - // period. - defer r.nodeQueue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workNodeQueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workNodeQueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workNodeQueue. - if key, ok = obj.(string); !ok { - // As the item in the workNodeQueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - r.nodeQueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in workNodeQueue but got %#v", obj)) - return nil - } - // Run the syncHandler, passing it the namespace/name string of the - // Foo resource to be synced. - if err := r.syncNode(logger, key); err != nil { - // Put the item back on the cidrQueue to handle any transient errors. - r.nodeQueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) - } - // Finally, if no error occurs we Forget this item so it does not - // get nodeQueue again until another change happens. - r.nodeQueue.Forget(obj) - logger.Info("Successfully synced", "key", key) - return nil - }(klog.FromContext(ctx), obj) - - if err != nil { - utilruntime.HandleError(err) - return true - } - - return true -} - -func (r *multiCIDRRangeAllocator) syncNode(logger klog.Logger, key string) error { - startTime := time.Now() - defer func() { - logger.V(4).Info("Finished syncing Node request", "node", key, "elapsed", time.Since(startTime)) - }() - - node, err := r.nodeLister.Get(key) - if apierrors.IsNotFound(err) { - logger.V(3).Info("node has been deleted", "node", key) - // TODO: obtain the node object information to call ReleaseCIDR from here - // and retry if there is an error. - return nil - } - if err != nil { - return err - } - // Check the DeletionTimestamp to determine if object is under deletion. - if !node.DeletionTimestamp.IsZero() { - logger.V(3).Info("node is being deleted", "node", key) - return r.ReleaseCIDR(logger, node) - } - return r.AllocateOrOccupyCIDR(logger, node) -} - -// needToAddFinalizer checks if a finalizer should be added to the object. -func needToAddFinalizer(obj metav1.Object, finalizer string) bool { - return obj.GetDeletionTimestamp() == nil && !slice.ContainsString(obj.GetFinalizers(), - finalizer, nil) -} - -func (r *multiCIDRRangeAllocator) syncClusterCIDR(ctx context.Context, key string) error { - startTime := time.Now() - logger := klog.FromContext(ctx) - defer func() { - logger.V(4).Info("Finished syncing clusterCIDR request", "key", key, "latency", time.Since(startTime)) - }() - - clusterCIDR, err := r.clusterCIDRLister.Get(key) - if apierrors.IsNotFound(err) { - logger.V(3).Info("clusterCIDR has been deleted", "key", key) - return nil - } - - if err != nil { - return err - } - - // Check the DeletionTimestamp to determine if object is under deletion. - if !clusterCIDR.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, clusterCIDR) - } - return r.reconcileCreate(ctx, clusterCIDR) -} - -// occupyCIDRs marks node.PodCIDRs[...] as used in allocator's tracked cidrSet. -func (r *multiCIDRRangeAllocator) occupyCIDRs(logger klog.Logger, node *v1.Node) error { - - err := func(node *v1.Node) error { - - if len(node.Spec.PodCIDRs) == 0 { - return nil - } - clusterCIDRList, err := r.orderedMatchingClusterCIDRs(logger, node, true) - if err != nil { - return err - } - - for _, clusterCIDR := range clusterCIDRList { - occupiedCount := 0 - - for _, cidr := range node.Spec.PodCIDRs { - _, podCIDR, err := netutil.ParseCIDRSloppy(cidr) - if err != nil { - return fmt.Errorf("failed to parse CIDR %s on Node %v: %w", cidr, node.Name, err) - } - - logger.Info("occupy CIDR for node", "CIDR", cidr, "node", klog.KObj(node)) - - if err := r.Occupy(clusterCIDR, podCIDR); err != nil { - logger.V(3).Info("Could not occupy cidr, trying next range", "podCIDRs", node.Spec.PodCIDRs, "err", err) - break - } - - occupiedCount++ - } - - // Mark CIDRs as occupied only if the CCC is able to occupy all the node CIDRs. - if occupiedCount == len(node.Spec.PodCIDRs) { - clusterCIDR.AssociatedNodes[node.Name] = true - return nil - } - } - - return fmt.Errorf("could not occupy cidrs: %v, No matching ClusterCIDRs found", node.Spec.PodCIDRs) - }(node) - - return err -} - -// associatedCIDRSet returns the CIDRSet, based on the ip family of the CIDR. -func (r *multiCIDRRangeAllocator) associatedCIDRSet(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) (*cidrset.MultiCIDRSet, error) { - switch { - case netutil.IsIPv4CIDR(cidr): - return clusterCIDR.IPv4CIDRSet, nil - case netutil.IsIPv6CIDR(cidr): - return clusterCIDR.IPv6CIDRSet, nil - default: - return nil, fmt.Errorf("invalid cidr: %v", cidr) - } -} - -// Occupy marks the CIDR as occupied in the allocatedCIDRMap of the cidrSet. -func (r *multiCIDRRangeAllocator) Occupy(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) error { - currCIDRSet, err := r.associatedCIDRSet(clusterCIDR, cidr) - if err != nil { - return err - } - - if err := currCIDRSet.Occupy(cidr); err != nil { - return fmt.Errorf("unable to occupy cidr %v in cidrSet", cidr) - } - - return nil -} - -// Release marks the CIDR as free in the cidrSet used bitmap, -// Also removes the CIDR from the allocatedCIDRSet. -func (r *multiCIDRRangeAllocator) Release(logger klog.Logger, clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) error { - currCIDRSet, err := r.associatedCIDRSet(clusterCIDR, cidr) - if err != nil { - return err - } - - if err := currCIDRSet.Release(cidr); err != nil { - logger.Info("Unable to release cidr in cidrSet", "CIDR", cidr) - return err - } - - return nil -} - -// AllocateOrOccupyCIDR allocates a CIDR to the node if the node doesn't have a -// CIDR already allocated, occupies the CIDR and marks as used if the node -// already has a PodCIDR assigned. -// WARNING: If you're adding any return calls or defer any more work from this -// function you have to make sure to update nodesInProcessing properly with the -// disposition of the node when the work is done. -func (r *multiCIDRRangeAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node) error { - r.lock.Lock() - defer r.lock.Unlock() - - if node == nil { - return nil - } - - if len(node.Spec.PodCIDRs) > 0 { - return r.occupyCIDRs(logger, node) - } - - cidrs, clusterCIDR, err := r.prioritizedCIDRs(logger, node) - if err != nil { - controllerutil.RecordNodeStatusChange(logger, r.recorder, node, "CIDRNotAvailable") - return fmt.Errorf("failed to get cidrs for node %s", node.Name) - } - - if len(cidrs) == 0 { - controllerutil.RecordNodeStatusChange(logger, r.recorder, node, "CIDRNotAvailable") - return fmt.Errorf("no cidrSets with matching labels found for node %s", node.Name) - } - - // allocate and queue the assignment. - allocated := multiCIDRNodeReservedCIDRs{ - nodeReservedCIDRs: nodeReservedCIDRs{ - nodeName: node.Name, - allocatedCIDRs: cidrs, - }, - clusterCIDR: clusterCIDR, - } - - return r.updateCIDRsAllocation(logger, allocated) -} - -// ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets. -func (r *multiCIDRRangeAllocator) ReleaseCIDR(logger klog.Logger, node *v1.Node) error { - r.lock.Lock() - defer r.lock.Unlock() - - if node == nil || len(node.Spec.PodCIDRs) == 0 { - return nil - } - - clusterCIDR, err := r.allocatedClusterCIDR(logger, node) - if err != nil { - return err - } - - for _, cidr := range node.Spec.PodCIDRs { - _, podCIDR, err := netutil.ParseCIDRSloppy(cidr) - if err != nil { - return fmt.Errorf("failed to parse CIDR %q on Node %q: %w", cidr, node.Name, err) - } - - logger.Info("release CIDR for node", "CIDR", cidr, "node", klog.KObj(node)) - if err := r.Release(logger, clusterCIDR, podCIDR); err != nil { - return fmt.Errorf("failed to release cidr %q from clusterCIDR %q for node %q: %w", cidr, clusterCIDR.Name, node.Name, err) - } - } - - // Remove the node from the ClusterCIDR AssociatedNodes. - delete(clusterCIDR.AssociatedNodes, node.Name) - - return nil -} - -// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used across all cidrs -// so that they won't be assignable. -func (r *multiCIDRRangeAllocator) filterOutServiceRange(logger klog.Logger, serviceCIDR *net.IPNet) { - // Checks if service CIDR has a nonempty intersection with cluster - // CIDR. It is the case if either clusterCIDR contains serviceCIDR with - // clusterCIDR's Mask applied (this means that clusterCIDR contains - // serviceCIDR) or vice versa (which means that serviceCIDR contains - // clusterCIDR). - for _, clusterCIDRList := range r.cidrMap { - for _, clusterCIDR := range clusterCIDRList { - if err := r.occupyServiceCIDR(clusterCIDR, serviceCIDR); err != nil { - logger.Error(err, "Unable to occupy service CIDR") - } - } - } -} - -func (r *multiCIDRRangeAllocator) occupyServiceCIDR(clusterCIDR *cidrset.ClusterCIDR, serviceCIDR *net.IPNet) error { - - cidrSet, err := r.associatedCIDRSet(clusterCIDR, serviceCIDR) - if err != nil { - return err - } - - cidr := cidrSet.ClusterCIDR - - // No need to occupy as Service CIDR doesn't intersect with the current ClusterCIDR. - if !cidr.Contains(serviceCIDR.IP.Mask(cidr.Mask)) && !serviceCIDR.Contains(cidr.IP.Mask(serviceCIDR.Mask)) { - return nil - } - - if err := r.Occupy(clusterCIDR, serviceCIDR); err != nil { - return fmt.Errorf("error filtering out service cidr %v from cluster cidr %v: %w", cidr, serviceCIDR, err) - } - - return nil -} - -// updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. -func (r *multiCIDRRangeAllocator) updateCIDRsAllocation(logger klog.Logger, data multiCIDRNodeReservedCIDRs) error { - err := func(data multiCIDRNodeReservedCIDRs) error { - cidrsString := ipnetToStringList(data.allocatedCIDRs) - node, err := r.nodeLister.Get(data.nodeName) - if err != nil { - logger.Error(err, "Failed while getting node for updating Node.Spec.PodCIDRs", "node", klog.KRef("", data.nodeName)) - return err - } - - // if cidr list matches the proposed, - // then we possibly updated this node - // and just failed to ack the success. - if len(node.Spec.PodCIDRs) == len(data.allocatedCIDRs) { - match := true - for idx, cidr := range cidrsString { - if node.Spec.PodCIDRs[idx] != cidr { - match = false - break - } - } - if match { - logger.V(4).Info("Node already has allocated CIDR. It matches the proposed one.", "node", klog.KObj(node), "CIDRs", data.allocatedCIDRs) - return nil - } - } - - // node has cidrs allocated, release the reserved. - if len(node.Spec.PodCIDRs) != 0 { - logger.Error(nil, "Node already has a CIDR allocated. Releasing the new one", "node", klog.KObj(node), "podCIDRs", node.Spec.PodCIDRs) - for _, cidr := range data.allocatedCIDRs { - if err := r.Release(logger, data.clusterCIDR, cidr); err != nil { - return fmt.Errorf("failed to release cidr %s from clusterCIDR %s for node: %s: %w", cidr, data.clusterCIDR.Name, node.Name, err) - } - } - return nil - } - - // If we reached here, it means that the node has no CIDR currently assigned. So we set it. - for i := 0; i < cidrUpdateRetries; i++ { - if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil { - data.clusterCIDR.AssociatedNodes[node.Name] = true - logger.Info("Set node PodCIDR", "node", klog.KObj(node), "podCIDR", cidrsString) - return nil - } - } - // failed release back to the pool. - logger.Error(err, "Failed to update node PodCIDR after attempts", "node", klog.KObj(node), "podCIDR", cidrsString, "retries", cidrUpdateRetries) - controllerutil.RecordNodeStatusChange(logger, r.recorder, node, "CIDRAssignmentFailed") - // We accept the fact that we may leak CIDRs here. This is safer than releasing - // them in case when we don't know if request went through. - // NodeController restart will return all falsely allocated CIDRs to the pool. - if !apierrors.IsServerTimeout(err) { - logger.Error(err, "CIDR assignment for node failed. Releasing allocated CIDR", "node", klog.KObj(node)) - for _, cidr := range data.allocatedCIDRs { - if err := r.Release(logger, data.clusterCIDR, cidr); err != nil { - return fmt.Errorf("failed to release cidr %q from clusterCIDR %q for node: %q: %w", cidr, data.clusterCIDR.Name, node.Name, err) - } - } - } - return err - }(data) - - return err -} - -// defaultNodeSelector generates a label with defaultClusterCIDRKey as the key and -// defaultClusterCIDRValue as the value, it is an internal nodeSelector matching all -// nodes. Only used if no ClusterCIDR selects the node. -func defaultNodeSelector() *v1.NodeSelector { - return &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: defaultClusterCIDRKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{defaultClusterCIDRValue}, - }, - }, - }, - }, - } -} - -// prioritizedCIDRs returns a list of CIDRs to be allocated to the node. -// Returns 1 CIDR if single stack. -// Returns 2 CIDRs , 1 from each ip family if dual stack. -func (r *multiCIDRRangeAllocator) prioritizedCIDRs(logger klog.Logger, node *v1.Node) ([]*net.IPNet, *cidrset.ClusterCIDR, error) { - clusterCIDRList, err := r.orderedMatchingClusterCIDRs(logger, node, true) - if err != nil { - return nil, nil, fmt.Errorf("unable to get a clusterCIDR for node %s: %w", node.Name, err) - } - - for _, clusterCIDR := range clusterCIDRList { - cidrs := make([]*net.IPNet, 0) - if clusterCIDR.IPv4CIDRSet != nil { - cidr, err := r.allocateCIDR(clusterCIDR, clusterCIDR.IPv4CIDRSet) - if err != nil { - logger.V(3).Info("Unable to allocate IPv4 CIDR, trying next range", "err", err) - continue - } - cidrs = append(cidrs, cidr) - } - - if clusterCIDR.IPv6CIDRSet != nil { - cidr, err := r.allocateCIDR(clusterCIDR, clusterCIDR.IPv6CIDRSet) - if err != nil { - logger.V(3).Info("Unable to allocate IPv6 CIDR, trying next range", "err", err) - continue - } - cidrs = append(cidrs, cidr) - } - - return cidrs, clusterCIDR, nil - } - return nil, nil, fmt.Errorf("unable to get a clusterCIDR for node %s, no available CIDRs", node.Name) -} - -func (r *multiCIDRRangeAllocator) allocateCIDR(clusterCIDR *cidrset.ClusterCIDR, cidrSet *cidrset.MultiCIDRSet) (*net.IPNet, error) { - - for evaluated := 0; evaluated < cidrSet.MaxCIDRs; evaluated++ { - candidate, lastEvaluated, err := cidrSet.NextCandidate() - if err != nil { - return nil, err - } - - evaluated += lastEvaluated - - if r.cidrInAllocatedList(candidate) { - continue - } - - // Deep Check. - if r.cidrOverlapWithAllocatedList(candidate) { - continue - } - - // Mark the CIDR as occupied in the map. - if err := r.Occupy(clusterCIDR, candidate); err != nil { - return nil, err - } - // Increment the evaluated count metric. - cidrSet.UpdateEvaluatedCount(evaluated) - return candidate, nil - } - return nil, &cidrset.CIDRRangeNoCIDRsRemainingErr{ - CIDR: cidrSet.Label, - } -} - -func (r *multiCIDRRangeAllocator) cidrInAllocatedList(cidr *net.IPNet) bool { - for _, clusterCIDRList := range r.cidrMap { - for _, clusterCIDR := range clusterCIDRList { - cidrSet, _ := r.associatedCIDRSet(clusterCIDR, cidr) - if cidrSet != nil { - if ok := cidrSet.AllocatedCIDRMap[cidr.String()]; ok { - return true - } - } - } - } - return false -} - -func (r *multiCIDRRangeAllocator) cidrOverlapWithAllocatedList(cidr *net.IPNet) bool { - for _, clusterCIDRList := range r.cidrMap { - for _, clusterCIDR := range clusterCIDRList { - cidrSet, _ := r.associatedCIDRSet(clusterCIDR, cidr) - if cidrSet != nil { - for allocated := range cidrSet.AllocatedCIDRMap { - _, allocatedCIDR, _ := netutil.ParseCIDRSloppy(allocated) - if cidr.Contains(allocatedCIDR.IP.Mask(cidr.Mask)) || allocatedCIDR.Contains(cidr.IP.Mask(allocatedCIDR.Mask)) { - return true - } - } - } - } - } - return false -} - -// allocatedClusterCIDR returns the ClusterCIDR from which the node CIDRs were allocated. -func (r *multiCIDRRangeAllocator) allocatedClusterCIDR(logger klog.Logger, node *v1.Node) (*cidrset.ClusterCIDR, error) { - clusterCIDRList, err := r.orderedMatchingClusterCIDRs(logger, node, false) - if err != nil { - return nil, fmt.Errorf("unable to get a clusterCIDR for node %s: %w", node.Name, err) - } - - for _, clusterCIDR := range clusterCIDRList { - if ok := clusterCIDR.AssociatedNodes[node.Name]; ok { - return clusterCIDR, nil - } - } - return nil, fmt.Errorf("no clusterCIDR found associated with node: %s", node.Name) -} - -// orderedMatchingClusterCIDRs returns a list of all the ClusterCIDRs matching the node labels. -// The list is ordered with the following priority, which act as tie-breakers. -// P0: ClusterCIDR with higher number of matching labels has the highest priority. -// P1: ClusterCIDR having cidrSet with fewer allocatable Pod CIDRs has higher priority. -// P2: ClusterCIDR with a PerNodeMaskSize having fewer IPs has higher priority. -// P3: ClusterCIDR having label with lower alphanumeric value has higher priority. -// P4: ClusterCIDR with a cidrSet having a smaller IP address value has a higher priority. -// -// orderedMatchingClusterCIDRs takes `occupy` as an argument, it determines whether the function -// is called during an occupy or a release operation. For a release operation, a ClusterCIDR must -// be added to the matching ClusterCIDRs list, irrespective of whether the ClusterCIDR is terminating. -func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(logger klog.Logger, node *v1.Node, occupy bool) ([]*cidrset.ClusterCIDR, error) { - matchingCIDRs := make([]*cidrset.ClusterCIDR, 0) - pq := make(PriorityQueue, 0) - - for label, clusterCIDRList := range r.cidrMap { - labelsMatch, matchCnt, err := r.matchCIDRLabels(logger, node, label) - if err != nil { - return nil, err - } - - if !labelsMatch { - continue - } - - for _, clusterCIDR := range clusterCIDRList { - pqItem := &PriorityQueueItem{ - clusterCIDR: clusterCIDR, - labelMatchCount: matchCnt, - selectorString: label, - } - - // Only push the CIDRsets which are not marked for termination. - // Always push the CIDRsets when marked for release. - if !occupy || !clusterCIDR.Terminating { - heap.Push(&pq, pqItem) - } - } - } - - // Remove the ClusterCIDRs from the PriorityQueue. - // They arrive in descending order of matchCnt, - // if matchCnt is equal it is ordered in ascending order of labels. - for pq.Len() > 0 { - pqItem := heap.Pop(&pq).(*PriorityQueueItem) - matchingCIDRs = append(matchingCIDRs, pqItem.clusterCIDR) - } - - // Append the catch all CIDR config. - defaultSelector, err := nodeSelectorAsSelector(defaultNodeSelector()) - if err != nil { - return nil, err - } - if clusterCIDRList, ok := r.cidrMap[defaultSelector.String()]; ok { - matchingCIDRs = append(matchingCIDRs, clusterCIDRList...) - } - return matchingCIDRs, nil -} - -// matchCIDRLabels Matches the Node labels to CIDR Configs. -// Returns true only if all the labels match, also returns the count of matching labels. -func (r *multiCIDRRangeAllocator) matchCIDRLabels(logger klog.Logger, node *v1.Node, label string) (bool, int, error) { - var labelSet labels.Set - var matchCnt int - labelsMatch := false - - ls, err := labels.Parse(label) - if err != nil { - logger.Error(err, "Unable to parse label to labels.Selector", "label", label) - return labelsMatch, 0, err - } - reqs, selectable := ls.Requirements() - - labelSet = node.ObjectMeta.Labels - if selectable { - matchCnt = 0 - for _, req := range reqs { - if req.Matches(labelSet) { - matchCnt += 1 - } - } - if matchCnt == len(reqs) { - labelsMatch = true - } - } - return labelsMatch, matchCnt, nil -} - -// Methods for handling ClusterCIDRs. - -// createDefaultClusterCIDR creates a default ClusterCIDR if --cluster-cidr has -// been configured. It converts the --cluster-cidr and --per-node-mask-size* flags -// to appropriate ClusterCIDR fields. -func createDefaultClusterCIDR(logger klog.Logger, existingConfigList *networkingv1alpha1.ClusterCIDRList, - allocatorParams CIDRAllocatorParams) { - // Create default ClusterCIDR only if --cluster-cidr has been configured - if len(allocatorParams.ClusterCIDRs) == 0 { - return - } - - for _, clusterCIDR := range existingConfigList.Items { - if clusterCIDR.Name == defaultClusterCIDRName { - // Default ClusterCIDR already exists, no further action required. - logger.V(3).Info("Default ClusterCIDR already exists", "defaultClusterCIDRName", defaultClusterCIDRName) - return - } - } - - // Create a default ClusterCIDR as it is not already created. - defaultCIDRConfig := &networkingv1alpha1.ClusterCIDR{ - TypeMeta: metav1.TypeMeta{ - APIVersion: defaultClusterCIDRAPIVersion, - Kind: "ClusterCIDR", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: defaultClusterCIDRName, - }, - Spec: networkingv1alpha1.ClusterCIDRSpec{ - PerNodeHostBits: minPerNodeHostBits, - }, - } - - ipv4PerNodeHostBits := int32(math.MinInt32) - ipv6PerNodeHostBits := int32(math.MinInt32) - isDualstack := false - if len(allocatorParams.ClusterCIDRs) == 2 { - isDualstack = true - } - - for i, cidr := range allocatorParams.ClusterCIDRs { - if netutil.IsIPv4CIDR(cidr) { - defaultCIDRConfig.Spec.IPv4 = cidr.String() - ipv4PerNodeHostBits = ipv4MaxCIDRMask - int32(allocatorParams.NodeCIDRMaskSizes[i]) - if !isDualstack && ipv4PerNodeHostBits > minPerNodeHostBits { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv4PerNodeHostBits - } - } else if netutil.IsIPv6CIDR(cidr) { - defaultCIDRConfig.Spec.IPv6 = cidr.String() - ipv6PerNodeHostBits = ipv6MaxCIDRMask - int32(allocatorParams.NodeCIDRMaskSizes[i]) - if !isDualstack && ipv6PerNodeHostBits > minPerNodeHostBits { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv6PerNodeHostBits - } - } - } - - if isDualstack { - // In case of dualstack CIDRs, currently the default values for PerNodeMaskSize are - // 24 for IPv4 (PerNodeHostBits=8) and 64 for IPv6(PerNodeHostBits=64), there is no - // requirement for the PerNodeHostBits to be equal for IPv4 and IPv6, However with - // the introduction of ClusterCIDRs, we enforce the requirement for a single - // PerNodeHostBits field, thus we choose the minimum PerNodeHostBits value, to avoid - // overflow for IPv4 CIDRs. - if ipv4PerNodeHostBits >= minPerNodeHostBits && ipv4PerNodeHostBits <= ipv6PerNodeHostBits { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv4PerNodeHostBits - } else if ipv6PerNodeHostBits >= minPerNodeHostBits && ipv6PerNodeHostBits <= ipv4MaxCIDRMask { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv6PerNodeHostBits - } - } - - existingConfigList.Items = append(existingConfigList.Items, *defaultCIDRConfig) - - return -} - -// reconcileCreate handles create ClusterCIDR events. -func (r *multiCIDRRangeAllocator) reconcileCreate(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - r.lock.Lock() - defer r.lock.Unlock() - - logger := klog.FromContext(ctx) - if needToAddFinalizer(clusterCIDR, clusterCIDRFinalizer) { - logger.V(3).Info("Creating ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - if err := r.createClusterCIDR(ctx, clusterCIDR, false); err != nil { - logger.Error(err, "Unable to create ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - return err - } - } - return nil -} - -// reconcileBootstrap handles creation of existing ClusterCIDRs. -// adds a finalizer if not already present. -func (r *multiCIDRRangeAllocator) reconcileBootstrap(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - r.lock.Lock() - defer r.lock.Unlock() - - logger := klog.FromContext(ctx) - terminating := false - // Create the ClusterCIDR only if the Spec has not been modified. - if clusterCIDR.Generation > 1 { - terminating = true - err := fmt.Errorf("CIDRs from ClusterCIDR %s will not be used for allocation as it was modified", clusterCIDR.Name) - logger.Error(err, "ClusterCIDR Modified") - } - - logger.V(2).Info("Creating ClusterCIDR during bootstrap", "clusterCIDR", clusterCIDR.Name) - if err := r.createClusterCIDR(ctx, clusterCIDR, terminating); err != nil { - logger.Error(err, "Unable to create ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - return err - } - - return nil -} - -// createClusterCIDR creates and maps the cidrSets in the cidrMap. -func (r *multiCIDRRangeAllocator) createClusterCIDR(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR, terminating bool) error { - nodeSelector, err := r.nodeSelectorKey(clusterCIDR) - if err != nil { - return fmt.Errorf("unable to get labelSelector key: %w", err) - } - - clusterCIDRSet, err := r.createClusterCIDRSet(clusterCIDR, terminating) - if err != nil { - return fmt.Errorf("invalid ClusterCIDR: %w", err) - } - - if clusterCIDRSet.IPv4CIDRSet == nil && clusterCIDRSet.IPv6CIDRSet == nil { - return errors.New("invalid ClusterCIDR: must provide IPv4 and/or IPv6 config") - } - - if err := r.mapClusterCIDRSet(r.cidrMap, nodeSelector, clusterCIDRSet); err != nil { - return fmt.Errorf("unable to map clusterCIDRSet: %w", err) - } - - // Make a copy so we don't mutate the shared informer cache. - updatedClusterCIDR := clusterCIDR.DeepCopy() - if needToAddFinalizer(clusterCIDR, clusterCIDRFinalizer) { - updatedClusterCIDR.ObjectMeta.Finalizers = append(clusterCIDR.ObjectMeta.Finalizers, clusterCIDRFinalizer) - } - - logger := klog.FromContext(ctx) - if updatedClusterCIDR.ResourceVersion == "" { - // Create is only used for creating default ClusterCIDR. - if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Create(ctx, updatedClusterCIDR, metav1.CreateOptions{}); err != nil { - logger.V(2).Info("Error creating ClusterCIDR", "clusterCIDR", klog.KObj(clusterCIDR), "err", err) - return err - } - } else { - // Update the ClusterCIDR object when called from reconcileCreate. - if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Update(ctx, updatedClusterCIDR, metav1.UpdateOptions{}); err != nil { - logger.V(2).Info("Error creating ClusterCIDR", "clusterCIDR", clusterCIDR.Name, "err", err) - return err - } - } - - return nil -} - -// createClusterCIDRSet creates and returns new cidrset.ClusterCIDR based on ClusterCIDR API object. -func (r *multiCIDRRangeAllocator) createClusterCIDRSet(clusterCIDR *networkingv1alpha1.ClusterCIDR, terminating bool) (*cidrset.ClusterCIDR, error) { - - clusterCIDRSet := &cidrset.ClusterCIDR{ - Name: clusterCIDR.Name, - AssociatedNodes: make(map[string]bool, 0), - Terminating: terminating, - } - - if clusterCIDR.Spec.IPv4 != "" { - _, ipv4CIDR, err := netutil.ParseCIDRSloppy(clusterCIDR.Spec.IPv4) - if err != nil { - return nil, fmt.Errorf("unable to parse provided IPv4 CIDR: %w", err) - } - clusterCIDRSet.IPv4CIDRSet, err = cidrset.NewMultiCIDRSet(ipv4CIDR, int(clusterCIDR.Spec.PerNodeHostBits)) - if err != nil { - return nil, fmt.Errorf("unable to create IPv4 cidrSet: %w", err) - } - } - - if clusterCIDR.Spec.IPv6 != "" { - _, ipv6CIDR, err := netutil.ParseCIDRSloppy(clusterCIDR.Spec.IPv6) - if err != nil { - return nil, fmt.Errorf("unable to parse provided IPv6 CIDR: %w", err) - } - clusterCIDRSet.IPv6CIDRSet, err = cidrset.NewMultiCIDRSet(ipv6CIDR, int(clusterCIDR.Spec.PerNodeHostBits)) - if err != nil { - return nil, fmt.Errorf("unable to create IPv6 cidrSet: %w", err) - } - } - - return clusterCIDRSet, nil -} - -// mapClusterCIDRSet maps the ClusterCIDRSet to the provided labelSelector in the cidrMap. -func (r *multiCIDRRangeAllocator) mapClusterCIDRSet(cidrMap map[string][]*cidrset.ClusterCIDR, nodeSelector string, clusterCIDRSet *cidrset.ClusterCIDR) error { - if clusterCIDRSet == nil { - return errors.New("invalid clusterCIDRSet, clusterCIDRSet cannot be nil") - } - - if clusterCIDRSetList, ok := cidrMap[nodeSelector]; ok { - cidrMap[nodeSelector] = append(clusterCIDRSetList, clusterCIDRSet) - } else { - cidrMap[nodeSelector] = []*cidrset.ClusterCIDR{clusterCIDRSet} - } - return nil -} - -// reconcileDelete releases the assigned ClusterCIDR and removes the finalizer -// if the deletion timestamp is set. -func (r *multiCIDRRangeAllocator) reconcileDelete(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - r.lock.Lock() - defer r.lock.Unlock() - - logger := klog.FromContext(ctx) - if slice.ContainsString(clusterCIDR.GetFinalizers(), clusterCIDRFinalizer, nil) { - logger.V(2).Info("Releasing ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - if err := r.deleteClusterCIDR(logger, clusterCIDR); err != nil { - logger.V(2).Info("Error while deleting ClusterCIDR", "err", err) - return err - } - // Remove the finalizer as delete is successful. - cccCopy := clusterCIDR.DeepCopy() - cccCopy.ObjectMeta.Finalizers = slice.RemoveString(cccCopy.ObjectMeta.Finalizers, clusterCIDRFinalizer, nil) - if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Update(ctx, cccCopy, metav1.UpdateOptions{}); err != nil { - logger.V(2).Info("Error removing finalizer for ClusterCIDR", "clusterCIDR", clusterCIDR.Name, "err", err) - return err - } - logger.V(2).Info("Removed finalizer for ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - } - return nil -} - -// deleteClusterCIDR Deletes and unmaps the ClusterCIDRs from the cidrMap. -func (r *multiCIDRRangeAllocator) deleteClusterCIDR(logger klog.Logger, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - - labelSelector, err := r.nodeSelectorKey(clusterCIDR) - if err != nil { - return fmt.Errorf("unable to delete cidr: %w", err) - } - - clusterCIDRSetList, ok := r.cidrMap[labelSelector] - if !ok { - logger.Info("Label not found in CIDRMap, proceeding with delete", "labelSelector", labelSelector) - return nil - } - - for i, clusterCIDRSet := range clusterCIDRSetList { - if clusterCIDRSet.Name != clusterCIDR.Name { - continue - } - - // Mark clusterCIDRSet as terminating. - clusterCIDRSet.Terminating = true - - // Allow deletion only if no nodes are associated with the ClusterCIDR. - if len(clusterCIDRSet.AssociatedNodes) > 0 { - return fmt.Errorf("ClusterCIDRSet %s marked as terminating, won't be deleted until all associated nodes are deleted", clusterCIDR.Name) - } - - // Remove the label from the map if this was the only clusterCIDR associated - // with it. - if len(clusterCIDRSetList) == 1 { - delete(r.cidrMap, labelSelector) - return nil - } - - clusterCIDRSetList = append(clusterCIDRSetList[:i], clusterCIDRSetList[i+1:]...) - r.cidrMap[labelSelector] = clusterCIDRSetList - return nil - } - logger.V(2).Info("clusterCIDR not found, proceeding with delete", "clusterCIDR", clusterCIDR.Name, "label", labelSelector) - return nil -} - -func (r *multiCIDRRangeAllocator) nodeSelectorKey(clusterCIDR *networkingv1alpha1.ClusterCIDR) (string, error) { - var nodeSelector labels.Selector - var err error - - if clusterCIDR.Spec.NodeSelector != nil { - nodeSelector, err = nodeSelectorAsSelector(clusterCIDR.Spec.NodeSelector) - } else { - nodeSelector, err = nodeSelectorAsSelector(defaultNodeSelector()) - } - - if err != nil { - return "", err - } - - return nodeSelector.String(), nil -} - -func listClusterCIDRs(ctx context.Context, kubeClient clientset.Interface) (*networkingv1alpha1.ClusterCIDRList, error) { - var clusterCIDRList *networkingv1alpha1.ClusterCIDRList - // We must poll because apiserver might not be up. This error causes - // controller manager to restart. - startTimestamp := time.Now() - - // start with 2s, multiply the duration by 1.6 each step, 11 steps = 9.7 minutes - backoff := wait.Backoff{ - Duration: 2 * time.Second, - Factor: 1.6, - Steps: 11, - } - - logger := klog.FromContext(ctx) - if pollErr := wait.ExponentialBackoff(backoff, func() (bool, error) { - var err error - clusterCIDRList, err = kubeClient.NetworkingV1alpha1().ClusterCIDRs().List(ctx, metav1.ListOptions{ - FieldSelector: fields.Everything().String(), - LabelSelector: labels.Everything().String(), - }) - if err != nil { - logger.Error(err, "Failed to list all clusterCIDRs") - return false, nil - } - return true, nil - }); pollErr != nil { - logger.Error(nil, "Failed to list clusterCIDRs", "latency", time.Now().Sub(startTimestamp)) - return nil, fmt.Errorf("failed to list all clusterCIDRs in %v, cannot proceed without updating CIDR map", - apiserverStartupGracePeriod) - } - return clusterCIDRList, nil -} - -// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement -// type to a labels.Requirement type. -func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) { - var op selection.Operator - switch nsr.Operator { - case v1.NodeSelectorOpIn: - op = selection.In - case v1.NodeSelectorOpNotIn: - op = selection.NotIn - case v1.NodeSelectorOpExists: - op = selection.Exists - case v1.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case v1.NodeSelectorOpGt: - op = selection.GreaterThan - case v1.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator) - } - return labels.NewRequirement(nsr.Key, op, nsr.Values) -} - -// TODO: nodeSelect and labelSelector semantics are different and the function -// doesn't translate them correctly, this has to be fixed before Beta -// xref: https://issues.k8s.io/116419 -// nodeSelectorAsSelector converts the NodeSelector api type into a struct that -// implements labels.Selector -// Note: This function should be kept in sync with the selector methods in -// pkg/labels/selector.go -func nodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) { - if ns == nil { - return labels.Nothing(), nil - } - if len(ns.NodeSelectorTerms) == 0 { - return labels.Everything(), nil - } - var requirements []labels.Requirement - - for _, nsTerm := range ns.NodeSelectorTerms { - for _, expr := range nsTerm.MatchExpressions { - req, err := nodeSelectorRequirementsAsLabelRequirements(expr) - if err != nil { - return nil, err - } - requirements = append(requirements, *req) - } - - for _, field := range nsTerm.MatchFields { - req, err := nodeSelectorRequirementsAsLabelRequirements(field) - if err != nil { - return nil, err - } - requirements = append(requirements, *req) - } - } - - selector := labels.NewSelector() - selector = selector.Add(requirements...) - return selector, nil -} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go deleted file mode 100644 index df680a575d2e3..0000000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go +++ /dev/null @@ -1,1876 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "context" - "fmt" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - k8stesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2/ktesting" - "k8s.io/kubernetes/pkg/controller" - cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" - "k8s.io/kubernetes/pkg/controller/testutil" - utilnet "k8s.io/utils/net" -) - -type testCaseMultiCIDR struct { - description string - fakeNodeHandler *testutil.FakeNodeHandler - allocatorParams CIDRAllocatorParams - testCIDRMap map[string][]*cidrset.ClusterCIDR - // key is index of the cidr allocated. - expectedAllocatedCIDR map[int]string - allocatedCIDRs map[int][]string - // should controller creation fail? - ctrlCreateFail bool -} - -type testClusterCIDR struct { - perNodeHostBits int32 - ipv4CIDR string - ipv6CIDR string - name string -} - -type testNodeSelectorRequirement struct { - key string - operator v1.NodeSelectorOperator - values []string -} - -func getTestNodeSelector(requirements []testNodeSelectorRequirement) string { - testNodeSelector := &v1.NodeSelector{} - - for _, nsr := range requirements { - nst := v1.NodeSelectorTerm{ - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: nsr.key, - Operator: nsr.operator, - Values: nsr.values, - }, - }, - } - testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst) - } - - selector, _ := nodeSelectorAsSelector(testNodeSelector) - return selector.String() -} - -func getTestCidrMap(testClusterCIDRMap map[string][]*testClusterCIDR) map[string][]*cidrset.ClusterCIDR { - cidrMap := make(map[string][]*cidrset.ClusterCIDR, 0) - for labels, testClusterCIDRList := range testClusterCIDRMap { - clusterCIDRList := make([]*cidrset.ClusterCIDR, 0) - for _, testClusterCIDR := range testClusterCIDRList { - clusterCIDR := &cidrset.ClusterCIDR{ - Name: testClusterCIDR.name, - AssociatedNodes: make(map[string]bool, 0), - } - - if testClusterCIDR.ipv4CIDR != "" { - _, testCIDR, _ := utilnet.ParseCIDRSloppy(testClusterCIDR.ipv4CIDR) - testCIDRSet, _ := cidrset.NewMultiCIDRSet(testCIDR, int(testClusterCIDR.perNodeHostBits)) - clusterCIDR.IPv4CIDRSet = testCIDRSet - } - if testClusterCIDR.ipv6CIDR != "" { - _, testCIDR, _ := utilnet.ParseCIDRSloppy(testClusterCIDR.ipv6CIDR) - testCIDRSet, _ := cidrset.NewMultiCIDRSet(testCIDR, int(testClusterCIDR.perNodeHostBits)) - clusterCIDR.IPv6CIDRSet = testCIDRSet - } - clusterCIDRList = append(clusterCIDRList, clusterCIDR) - } - cidrMap[labels] = clusterCIDRList - } - return cidrMap -} - -func getClusterCIDRList(nodeName string, cidrMap map[string][]*cidrset.ClusterCIDR) ([]*cidrset.ClusterCIDR, error) { - labelSelector := getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{nodeName}, - }, - }) - if clusterCIDRList, ok := cidrMap[labelSelector]; ok { - return clusterCIDRList, nil - } - return nil, fmt.Errorf("unable to get clusterCIDR for node: %s", nodeName) -} - -func TestMultiCIDROccupyPreExistingCIDR(t *testing.T) { - // all tests operate on a single node. - testCaseMultiCIDRs := []testCaseMultiCIDR{ - { - description: "success, single stack no node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - { - description: "success, dual stack no node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - { - description: "success, single stack correct node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr-allocated", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - { - description: "success, dual stack both allocated correctly", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24", "ace:cab:deca::1/120"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-allocated", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - // failure cases. - { - description: "fail, single stack incorrect node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"172.10.0.1/24"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr-allocate-fail", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - { - description: "fail, dualstack node allocating from non existing cidr", - - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24", "a00::/86"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-allocate-fail", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - { - description: "fail, dualstack node allocating bad v4", - - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"172.10.0.1/24", "ace:cab:deca::1/120"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-bad-v4", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - { - description: "fail, dualstack node allocating bad v6", - - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-bad-v6", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - } - - // test function - _, ctx := ktesting.NewTestContext(t) - for _, tc := range testCaseMultiCIDRs { - t.Run(tc.description, func(t *testing.T) { - // Initialize the range allocator. - fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) - - _, err := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, fakeNodeInformer, fakeClusterCIDRInformer, tc.allocatorParams, nodeList, tc.testCIDRMap) - if err == nil && tc.ctrlCreateFail { - t.Fatalf("creating range allocator was expected to fail, but it did not") - } - if err != nil && !tc.ctrlCreateFail { - t.Fatalf("creating range allocator was expected to succeed, but it did not") - } - }) - } -} - -func TestMultiCIDRAllocateOrOccupyCIDRSuccess(t *testing.T) { - // Non-parallel test (overrides global var). - oldNodePollInterval := nodePollInterval - nodePollInterval = test.NodePollInterval - defer func() { - nodePollInterval = oldNodePollInterval - }() - - // all tests operate on a single node. - testCaseMultiCIDRs := []testCaseMultiCIDR{ - { - description: "When there's no ServiceCIDR return first CIDR in range", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/24", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.234.0/30", - }, - }, - { - description: "Correctly filter out ServiceCIDR", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - NodeCIDRMaskSizes: []int{30}, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/24", - }, - }, - }), - // it should return first /30 CIDR after service range. - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.234.64/30", - }, - }, - { - description: "Correctly ignore already allocated CIDRs", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/24", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, - }, - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.234.76/30", - }, - }, - { - description: "Dualstack CIDRs, prioritize clusterCIDR with higher label match count", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/8", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/8", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "127.0.0.0/24", - 1: "abc:def:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, prioritize clusterCIDR with higher label match count, overlapping CIDRs", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/8", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24", "10.0.4.0/24"}, - 1: {"ace:cab:deca::/120"}, - }, - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.3.0/24", - 1: "ace:cab:deca::100/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label match count, prioritize clusterCIDR with fewer allocatable pod CIDRs", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/8", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/24", - ipv6CIDR: "ace:cab:deca::/120", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/24", - 1: "ace:cab:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count, non comparable allocatable pod CIDRs, prioritize clusterCIDR with lower perNodeMaskSize", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/23", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/120", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/24", - 1: "ace:cab:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count and allocatable pod CIDRs, prioritize clusterCIDR with lower perNodeMaskSize", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/24", - ipv6CIDR: "abc:def:deca::/120", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 0, - ipv4CIDR: "10.0.0.0/32", - ipv6CIDR: "ace:cab:deca::/128", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/32", - 1: "ace:cab:deca::/128", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count, allocatable pod CIDRs and allocatable IPs, prioritize clusterCIDR with lower alphanumeric label", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/16", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.0.0/24", - 1: "abc:def:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count, allocatable pod CIDRs, allocatable IPs and labels, prioritize clusterCIDR with smaller IP", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/16", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/24", - 1: "ace:cab:deca::/120", - }, - }, - { - description: "no double counting", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "nodepool1", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.0/24"}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - Labels: map[string]string{ - "testLabel-0": "nodepool1", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.2.0/24"}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - Labels: map[string]string{ - "testLabel-0": "nodepool1", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"nodepool1"}, - }, - }): { - { - name: "no-double-counting", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/22", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.10.1.0/24", - }, - }, - } - - logger, ctx := ktesting.NewTestContext(t) - - // test function - testFunc := func(tc testCaseMultiCIDR) { - nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) - // Initialize the range allocator. - - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - allocator, err := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nodeList, tc.testCIDRMap) - if err != nil { - t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) - return - } - rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) - if !ok { - t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) - return - } - rangeAllocator.nodesSynced = test.AlwaysReady - rangeAllocator.recorder = testutil.NewFakeRecorder() - - // this is a bit of white box testing - // pre allocate the CIDRs as per the test - for _, allocatedList := range tc.allocatedCIDRs { - for _, allocated := range allocatedList { - _, cidr, err := utilnet.ParseCIDRSloppy(allocated) - if err != nil { - t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) - } - - clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) - if err != nil { - t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) - } - - occupied := false - for _, clusterCIDR := range clusterCIDRList { - if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { - occupied = true - break - } - } - if !occupied { - t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) - } - } - } - - updateCount := 0 - for _, node := range tc.fakeNodeHandler.Existing { - if node.Spec.PodCIDRs == nil { - updateCount++ - } - if err := allocator.AllocateOrOccupyCIDR(logger, node); err != nil { - t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) - } - } - if updateCount != 1 { - t.Fatalf("test error: all tests must update exactly one node") - } - if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { - t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) - } - - if len(tc.expectedAllocatedCIDR) == 0 { - // nothing further expected - return - } - for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { - if len(updatedNode.Spec.PodCIDRs) == 0 { - continue // not assigned yet - } - //match - for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { - if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { - t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) - break - } - } - } - } - - // run the test cases - for _, tc := range testCaseMultiCIDRs { - testFunc(tc) - } -} - -func TestMultiCIDRAllocateOrOccupyCIDRFailure(t *testing.T) { - testCaseMultiCIDRs := []testCaseMultiCIDR{ - { - description: "When there's no ServiceCIDR return first CIDR in range", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "allocate-fail", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/28", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, - }, - }, - } - - logger, ctx := ktesting.NewTestContext(t) - - testFunc := func(tc testCaseMultiCIDR) { - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - - // Initialize the range allocator. - allocator, err := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nil, tc.testCIDRMap) - if err != nil { - t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) - } - rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) - if !ok { - t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) - return - } - rangeAllocator.nodesSynced = test.AlwaysReady - rangeAllocator.recorder = testutil.NewFakeRecorder() - - // this is a bit of white box testing - // pre allocate the CIDRs as per the test - for _, allocatedList := range tc.allocatedCIDRs { - for _, allocated := range allocatedList { - _, cidr, err := utilnet.ParseCIDRSloppy(allocated) - if err != nil { - t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) - } - - clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) - if err != nil { - t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) - } - - occupied := false - for _, clusterCIDR := range clusterCIDRList { - if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { - occupied = true - break - } - } - if !occupied { - t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) - } - } - } - - if err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err == nil { - t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) - } - // We don't expect any updates, so just sleep for some time - time.Sleep(time.Second) - if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { - t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) - } - if len(tc.expectedAllocatedCIDR) == 0 { - // nothing further expected - return - } - for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { - if len(updatedNode.Spec.PodCIDRs) == 0 { - continue // not assigned yet - } - //match - for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { - if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR { - t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) - break - } - } - } - } - for _, tc := range testCaseMultiCIDRs { - testFunc(tc) - } -} - -type releasetestCaseMultiCIDR struct { - description string - fakeNodeHandler *testutil.FakeNodeHandler - testCIDRMap map[string][]*cidrset.ClusterCIDR - allocatorParams CIDRAllocatorParams - expectedAllocatedCIDRFirstRound map[int]string - expectedAllocatedCIDRSecondRound map[int]string - allocatedCIDRs map[int][]string - cidrsToRelease [][]string -} - -func TestMultiCIDRReleaseCIDRSuccess(t *testing.T) { - // Non-parallel test (overrides global var) - oldNodePollInterval := nodePollInterval - nodePollInterval = test.NodePollInterval - defer func() { - nodePollInterval = oldNodePollInterval - }() - - testCaseMultiCIDRs := []releasetestCaseMultiCIDR{ - { - description: "Correctly release preallocated CIDR", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "cidr-release", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/28", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, - }, - expectedAllocatedCIDRFirstRound: nil, - cidrsToRelease: [][]string{ - {"127.123.234.4/30"}, - }, - expectedAllocatedCIDRSecondRound: map[int]string{ - 0: "127.123.234.4/30", - }, - }, - { - description: "Correctly recycle CIDR", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "cidr-release", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/28", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, - }, - expectedAllocatedCIDRFirstRound: map[int]string{ - 0: "127.123.234.0/30", - }, - cidrsToRelease: [][]string{ - {"127.123.234.0/30"}, - }, - expectedAllocatedCIDRSecondRound: map[int]string{ - 0: "127.123.234.0/30", - }, - }, - } - logger, ctx := ktesting.NewTestContext(t) - testFunc := func(tc releasetestCaseMultiCIDR) { - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - // Initialize the range allocator. - allocator, _ := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nil, tc.testCIDRMap) - rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) - if !ok { - t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) - return - } - rangeAllocator.nodesSynced = test.AlwaysReady - rangeAllocator.recorder = testutil.NewFakeRecorder() - - // this is a bit of white box testing - for _, allocatedList := range tc.allocatedCIDRs { - for _, allocated := range allocatedList { - _, cidr, err := utilnet.ParseCIDRSloppy(allocated) - if err != nil { - t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) - } - - clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) - if err != nil { - t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) - } - - occupied := false - for _, clusterCIDR := range clusterCIDRList { - if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { - occupied = true - clusterCIDR.AssociatedNodes["fakeNode"] = true - break - } - } - if !occupied { - t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) - } - } - } - - err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]) - if len(tc.expectedAllocatedCIDRFirstRound) != 0 { - if err != nil { - t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) - } - if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { - t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) - } - } else { - if err == nil { - t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) - } - // We don't expect any updates here - time.Sleep(time.Second) - if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { - t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) - } - } - - for _, cidrToRelease := range tc.cidrsToRelease { - - nodeToRelease := v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "fakeNode", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - } - nodeToRelease.Spec.PodCIDRs = cidrToRelease - err = allocator.ReleaseCIDR(logger, &nodeToRelease) - if err != nil { - t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err) - } - } - if err = allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err != nil { - t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) - } - if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { - t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) - } - - if len(tc.expectedAllocatedCIDRSecondRound) == 0 { - // nothing further expected - return - } - for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { - if len(updatedNode.Spec.PodCIDRs) == 0 { - continue // not assigned yet - } - //match - for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound { - if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { - t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) - break - } - } - } - } - - for _, tc := range testCaseMultiCIDRs { - testFunc(tc) - } -} - -// ClusterCIDR tests. - -var alwaysReady = func() bool { return true } - -type clusterCIDRController struct { - *multiCIDRRangeAllocator - clusterCIDRStore cache.Store -} - -func newController(ctx context.Context) (*fake.Clientset, *clusterCIDRController) { - client := fake.NewSimpleClientset() - - informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) - cccInformer := informerFactory.Networking().V1alpha1().ClusterCIDRs() - cccIndexer := cccInformer.Informer().GetIndexer() - - nodeInformer := informerFactory.Core().V1().Nodes() - - // These reactors are required to mock functionality that would be covered - // automatically if we weren't using the fake client. - client.PrependReactor("create", "clustercidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) { - clusterCIDR := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ClusterCIDR) - - if clusterCIDR.ObjectMeta.GenerateName != "" { - clusterCIDR.ObjectMeta.Name = fmt.Sprintf("%s-%s", clusterCIDR.ObjectMeta.GenerateName, rand.String(8)) - clusterCIDR.ObjectMeta.GenerateName = "" - } - clusterCIDR.Generation = 1 - cccIndexer.Add(clusterCIDR) - - return false, clusterCIDR, nil - })) - client.PrependReactor("update", "clustercidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) { - clusterCIDR := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ClusterCIDR) - clusterCIDR.Generation++ - cccIndexer.Update(clusterCIDR) - - return false, clusterCIDR, nil - })) - - _, clusterCIDR, _ := utilnet.ParseCIDRSloppy("192.168.0.0/16") - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("10.1.0.0/16") - - allocatorParams := CIDRAllocatorParams{ - ClusterCIDRs: []*net.IPNet{clusterCIDR}, - ServiceCIDR: serviceCIDR, - SecondaryServiceCIDR: nil, - NodeCIDRMaskSizes: []int{24}, - } - testCIDRMap := make(map[string][]*cidrset.ClusterCIDR, 0) - - // Initialize the range allocator. - ra, _ := NewMultiCIDRRangeAllocator(ctx, client, nodeInformer, cccInformer, allocatorParams, nil, testCIDRMap) - cccController := ra.(*multiCIDRRangeAllocator) - - cccController.clusterCIDRSynced = alwaysReady - - return client, &clusterCIDRController{ - cccController, - informerFactory.Networking().V1alpha1().ClusterCIDRs().Informer().GetStore(), - } -} - -// Ensure default ClusterCIDR is created during bootstrap. -func TestClusterCIDRDefault(t *testing.T) { - defaultCCC := makeClusterCIDR(defaultClusterCIDRName, "192.168.0.0/16", "", 8, nil) - _, ctx := ktesting.NewTestContext(t) - client, _ := newController(ctx) - createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), defaultClusterCIDRName, metav1.GetOptions{}) - assert.Nil(t, err, "Expected no error getting clustercidr objects") - assert.Equal(t, defaultCCC.Spec, createdCCC.Spec) -} - -// Ensure SyncClusterCIDR creates a new valid ClusterCIDR. -func TestSyncClusterCIDRCreate(t *testing.T) { - tests := []struct { - name string - ccc *networkingv1alpha1.ClusterCIDR - wantErr bool - }{ - { - name: "valid IPv4 ClusterCIDR with no NodeSelector", - ccc: makeClusterCIDR("ipv4-ccc", "10.2.0.0/16", "", 8, nil), - wantErr: false, - }, - { - name: "valid IPv4 ClusterCIDR with NodeSelector", - ccc: makeClusterCIDR("ipv4-ccc-label", "10.3.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid IPv4 ClusterCIDR with overlapping CIDRs", - ccc: makeClusterCIDR("ipv4-ccc-overlap", "10.2.0.0/24", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid IPv6 ClusterCIDR with no NodeSelector", - ccc: makeClusterCIDR("ipv6-ccc", "", "fd00:1::/112", 8, nil), - wantErr: false, - }, - { - name: "valid IPv6 ClusterCIDR with NodeSelector", - ccc: makeClusterCIDR("ipv6-ccc-label", "", "fd00:2::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid IPv6 ClusterCIDR with overlapping CIDRs", - ccc: makeClusterCIDR("ipv6-ccc-overlap", "", "fd00:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid Dualstack ClusterCIDR with no NodeSelector", - ccc: makeClusterCIDR("dual-ccc", "10.2.0.0/16", "fd00:1::/112", 8, nil), - wantErr: false, - }, - { - name: "valid DualStack ClusterCIDR with NodeSelector", - ccc: makeClusterCIDR("dual-ccc-label", "10.3.0.0/16", "fd00:2::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid Dualstack ClusterCIDR with overlapping CIDRs", - ccc: makeClusterCIDR("dual-ccc-overlap", "10.2.0.0/16", "fd00:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - // invalid ClusterCIDRs. - { - name: "invalid ClusterCIDR with both IPv4 and IPv6 CIDRs nil", - ccc: makeClusterCIDR("invalid-ccc", "", "", 0, nil), - wantErr: true, - }, - { - name: "invalid IPv4 ClusterCIDR", - ccc: makeClusterCIDR("invalid-ipv4-ccc", "1000.2.0.0/16", "", 8, nil), - wantErr: true, - }, - { - name: "invalid IPv6 ClusterCIDR", - ccc: makeClusterCIDR("invalid-ipv6-ccc", "", "aaaaa:1:1::/112", 8, nil), - wantErr: true, - }, - { - name: "invalid dualstack ClusterCIDR", - ccc: makeClusterCIDR("invalid-dual-ccc", "10.2.0.0/16", "aaaaa:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: true, - }, - } - _, ctx := ktesting.NewTestContext(t) - client, cccController := newController(ctx) - for _, tc := range tests { - cccController.clusterCIDRStore.Add(tc.ccc) - err := cccController.syncClusterCIDR(ctx, tc.ccc.Name) - if tc.wantErr { - assert.Error(t, err) - continue - } - assert.NoError(t, err) - expectActions(t, client.Actions(), 1, "create", "clustercidrs") - - createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), tc.ccc.Name, metav1.GetOptions{}) - assert.Nil(t, err, "Expected no error getting clustercidr object") - assert.Equal(t, tc.ccc.Spec, createdCCC.Spec) - assert.Equal(t, []string{clusterCIDRFinalizer}, createdCCC.Finalizers) - } -} - -// Ensure syncClusterCIDR for ClusterCIDR delete removes the ClusterCIDR. -func TestSyncClusterCIDRDelete(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - _, cccController := newController(ctx) - - testCCC := makeClusterCIDR("testing-1", "10.1.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})) - - cccController.clusterCIDRStore.Add(testCCC) - err := cccController.syncClusterCIDR(ctx, testCCC.Name) - assert.NoError(t, err) - - deletionTimestamp := metav1.Now() - testCCC.DeletionTimestamp = &deletionTimestamp - cccController.clusterCIDRStore.Update(testCCC) - err = cccController.syncClusterCIDR(ctx, testCCC.Name) - assert.NoError(t, err) -} - -// Ensure syncClusterCIDR for ClusterCIDR delete does not remove ClusterCIDR -// if a node is associated with the ClusterCIDR. -func TestSyncClusterCIDRDeleteWithNodesAssociated(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - client, cccController := newController(ctx) - - testCCC := makeClusterCIDR("testing-1", "10.1.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})) - - cccController.clusterCIDRStore.Add(testCCC) - err := cccController.syncClusterCIDR(ctx, testCCC.Name) - assert.NoError(t, err) - - // Mock the IPAM controller behavior associating node with ClusterCIDR. - nodeSelectorKey, _ := cccController.nodeSelectorKey(testCCC) - clusterCIDRs, _ := cccController.cidrMap[nodeSelectorKey] - clusterCIDRs[0].AssociatedNodes["test-node"] = true - - createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), testCCC.Name, metav1.GetOptions{}) - assert.Nil(t, err, "Expected no error getting clustercidr object") - - deletionTimestamp := metav1.Now() - createdCCC.DeletionTimestamp = &deletionTimestamp - cccController.clusterCIDRStore.Update(createdCCC) - err = cccController.syncClusterCIDR(ctx, createdCCC.Name) - assert.Error(t, err, fmt.Sprintf("ClusterCIDR %s marked as terminating, won't be deleted until all associated nodes are deleted", createdCCC.Name)) -} - -func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, resource string) { - t.Helper() - // if actions are less, the below logic will panic. - if num > len(actions) { - t.Fatalf("len of actions %v is unexpected. Expected to be at least %v", len(actions), num+1) - } - - for i := 0; i < num; i++ { - relativePos := len(actions) - i - 1 - assert.Equal(t, verb, actions[relativePos].GetVerb(), "Expected action -%d verb to be %s", i, verb) - assert.Equal(t, resource, actions[relativePos].GetResource().Resource, "Expected action -%d resource to be %s", i, resource) - } -} - -func makeNodeSelector(key string, op v1.NodeSelectorOperator, values []string) *v1.NodeSelector { - return &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: key, - Operator: op, - Values: values, - }, - }, - }, - }, - } -} - -// makeClusterCIDR returns a mock ClusterCIDR object. -func makeClusterCIDR(cccName, ipv4CIDR, ipv6CIDR string, perNodeHostBits int32, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { - testCCC := &networkingv1alpha1.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: cccName}, - Spec: networkingv1alpha1.ClusterCIDRSpec{}, - } - - testCCC.Spec.PerNodeHostBits = perNodeHostBits - - if ipv4CIDR != "" { - testCCC.Spec.IPv4 = ipv4CIDR - } - - if ipv6CIDR != "" { - testCCC.Spec.IPv6 = ipv6CIDR - } - - if nodeSelector != nil { - testCCC.Spec.NodeSelector = nodeSelector - } - - return testCCC -} diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go index cf547f81d75c9..c98671b1d5132 100644 --- a/pkg/controller/nodeipam/node_ipam_controller.go +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -24,7 +24,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" @@ -83,7 +82,6 @@ type Controller struct { func NewNodeIpamController( ctx context.Context, nodeInformer coreinformers.NodeInformer, - clusterCIDRInformer networkinginformers.ClusterCIDRInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterCIDRs []*net.IPNet, @@ -139,7 +137,7 @@ func NewNodeIpamController( NodeCIDRMaskSizes: nodeCIDRMaskSizes, } - ic.cidrAllocator, err = ipam.New(ctx, kubeClient, cloud, nodeInformer, clusterCIDRInformer, ic.allocatorType, allocatorParams) + ic.cidrAllocator, err = ipam.New(ctx, kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams) if err != nil { return nil, err } diff --git a/pkg/controller/nodeipam/node_ipam_controller_test.go b/pkg/controller/nodeipam/node_ipam_controller_test.go index f15cf6b73362a..0c11c5220af5b 100644 --- a/pkg/controller/nodeipam/node_ipam_controller_test.go +++ b/pkg/controller/nodeipam/node_ipam_controller_test.go @@ -28,14 +28,11 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" "k8s.io/kubernetes/pkg/controller/testutil" - "k8s.io/kubernetes/pkg/features" "k8s.io/legacy-cloud-providers/gce" netutils "k8s.io/utils/net" ) @@ -51,7 +48,6 @@ func newTestNodeIpamController(ctx context.Context, clusterCIDR []*net.IPNet, se fakeClient := &fake.Clientset{} fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes() - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() for _, node := range fakeNodeHandler.Existing { fakeNodeInformer.Informer().GetStore().Add(node) @@ -60,7 +56,7 @@ func newTestNodeIpamController(ctx context.Context, clusterCIDR []*net.IPNet, se fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) return NewNodeIpamController( ctx, - fakeNodeInformer, fakeClusterCIDRInformer, fakeGCE, clientSet, + fakeNodeInformer, fakeGCE, clientSet, clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType, ) } @@ -120,42 +116,3 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) { }) } } - -// MultiCIDRRangeAllocatorType need enable feature gate -func TestNewNodeIpamControllerWithCIDRMasks2(t *testing.T) { - emptyServiceCIDR := "" - for _, tc := range []struct { - desc string - clusterCIDR string - serviceCIDR string - secondaryServiceCIDR string - maskSize []int - allocatorType ipam.CIDRAllocatorType - }{ - {"valid_multi_cidr_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.MultiCIDRRangeAllocatorType}, - {"valid_multi_cidr_range_allocator_dualstack", "10.0.0.0/21,2000::/48", "10.1.0.0/21", emptyServiceCIDR, []int{24, 64}, ipam.MultiCIDRRangeAllocatorType}, - } { - test := tc - _, ctx := ktesting.NewTestContext(t) - t.Run(test.desc, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - clusterCidrs, err := netutils.ParseCIDRs(strings.Split(test.clusterCIDR, ",")) - if err != nil { - clusterCidrs = nil - } - _, serviceCIDRIpNet, err := netutils.ParseCIDRSloppy(test.serviceCIDR) - if err != nil { - serviceCIDRIpNet = nil - } - _, secondaryServiceCIDRIpNet, err := netutils.ParseCIDRSloppy(test.secondaryServiceCIDR) - if err != nil { - secondaryServiceCIDRIpNet = nil - } - _, err = newTestNodeIpamController(ctx, clusterCidrs, serviceCIDRIpNet, secondaryServiceCIDRIpNet, test.maskSize, test.allocatorType) - if err != nil { - t.Errorf("Test %s, got error %v", test.desc, err) - } - }) - } -} diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index c5083902b2c82..65e669e45b58a 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -31,16 +31,17 @@ import ( "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/feature" - corev1apply "k8s.io/client-go/applyconfigurations/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + apipod "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/apis/core/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" + utilpod "k8s.io/kubernetes/pkg/util/pod" "k8s.io/klog/v2" ) @@ -55,9 +56,6 @@ const ( UpdateWorkerSize = 8 podUpdateChannelSize = 1 retries = 5 - - // fieldManager used to add pod disruption condition when evicting pods due to NoExecute taint - fieldManager = "TaintManager" ) type nodeUpdateItem struct { @@ -127,16 +125,17 @@ func addConditionAndDeletePod(ctx context.Context, c clientset.Interface, name, if err != nil { return err } - podApply := corev1apply.Pod(pod.Name, pod.Namespace).WithStatus(corev1apply.PodStatus()) - podApply.Status.WithConditions(corev1apply.PodCondition(). - WithType(v1.DisruptionTarget). - WithStatus(v1.ConditionTrue). - WithReason("DeletionByTaintManager"). - WithMessage("Taint manager: deleting due to NoExecute taint"). - WithLastTransitionTime(metav1.Now()), - ) - if _, err := c.CoreV1().Pods(pod.Namespace).ApplyStatus(ctx, podApply, metav1.ApplyOptions{FieldManager: fieldManager, Force: true}); err != nil { - return err + newStatus := pod.Status.DeepCopy() + updated := apipod.UpdatePodCondition(newStatus, &v1.PodCondition{ + Type: v1.DisruptionTarget, + Status: v1.ConditionTrue, + Reason: "DeletionByTaintManager", + Message: "Taint manager: deleting due to NoExecute taint", + }) + if updated { + if _, _, _, err := utilpod.PatchPodStatus(ctx, c, pod.Namespace, pod.Name, pod.UID, pod.Status, *newStatus); err != nil { + return err + } } } return c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{}) diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 858405fe60f3f..a814a6c8069a8 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -534,7 +534,7 @@ func (a *HorizontalController) reconcileKey(ctx context.Context, key string) (de // computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType. func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicas int32, timestamp time.Time, metricName string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { - if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType { + if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType && metricSpec.Object.Target.Value != nil { replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector) if err != nil { condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) @@ -554,7 +554,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status }, } return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil - } else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType { + } else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType && metricSpec.Object.Target.AverageValue != nil { replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector) if err != nil { condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) diff --git a/pkg/controller/podautoscaler/replica_calculator.go b/pkg/controller/podautoscaler/replica_calculator.go index db2bd16c3d84b..1acd9c439e803 100644 --- a/pkg/controller/podautoscaler/replica_calculator.go +++ b/pkg/controller/podautoscaler/replica_calculator.go @@ -425,7 +425,14 @@ func calculatePodRequests(pods []*v1.Pod, container string, resource v1.Resource requests := make(map[string]int64, len(pods)) for _, pod := range pods { podSum := int64(0) - for _, c := range pod.Spec.Containers { + // Calculate all regular containers and restartable init containers requests. + containers := append([]v1.Container{}, pod.Spec.Containers...) + for _, c := range pod.Spec.InitContainers { + if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways { + containers = append(containers, c) + } + } + for _, c := range containers { if container == "" || container == c.Name { if containerRequest, ok := c.Resources.Requests[resource]; ok { podSum += containerRequest.MilliValue() diff --git a/pkg/controller/podautoscaler/replica_calculator_test.go b/pkg/controller/podautoscaler/replica_calculator_test.go index e82efe64307f4..1789ad1f2cb05 100644 --- a/pkg/controller/podautoscaler/replica_calculator_test.go +++ b/pkg/controller/podautoscaler/replica_calculator_test.go @@ -1999,3 +1999,112 @@ func TestGroupPods(t *testing.T) { }) } } + +func TestCalculatePodRequests(t *testing.T) { + containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways + testPod := "test-pod" + + tests := []struct { + name string + pods []*v1.Pod + container string + resource v1.ResourceName + expectedRequests map[string]int64 + expectedError error + }{ + { + name: "void", + pods: []*v1.Pod{}, + container: "", + resource: v1.ResourceCPU, + expectedRequests: map[string]int64{}, + expectedError: nil, + }, + { + name: "pod with regular containers", + pods: []*v1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: testNamespace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}}, + {Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}}, + }, + }, + }}, + container: "", + resource: v1.ResourceCPU, + expectedRequests: map[string]int64{testPod: 150}, + expectedError: nil, + }, + { + name: "calculate requests with special container", + pods: []*v1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: testNamespace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}}, + {Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}}, + }, + }, + }}, + container: "container1", + resource: v1.ResourceCPU, + expectedRequests: map[string]int64{testPod: 100}, + expectedError: nil, + }, + { + name: "container missing requests", + pods: []*v1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: testNamespace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container1"}, + }, + }, + }}, + container: "", + resource: v1.ResourceCPU, + expectedRequests: nil, + expectedError: fmt.Errorf("missing request for %s in container %s of Pod %s", v1.ResourceCPU, "container1", testPod), + }, + { + name: "pod with restartable init containers", + pods: []*v1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPod, + Namespace: testNamespace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}}, + }, + InitContainers: []v1.Container{ + {Name: "init-container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI)}}}, + {Name: "restartable-container1", RestartPolicy: &containerRestartPolicyAlways, Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}}, + }, + }, + }}, + container: "", + resource: v1.ResourceCPU, + expectedRequests: map[string]int64{testPod: 150}, + expectedError: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + requests, err := calculatePodRequests(tc.pods, tc.container, tc.resource) + assert.Equal(t, tc.expectedRequests, requests, "requests should be as expected") + assert.Equal(t, tc.expectedError, err, "error should be as expected") + }) + } +} diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index 81288dfd6ae9c..476c764bdc0d7 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -30,17 +30,18 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" - corev1apply "k8s.io/client-go/applyconfigurations/core/v1" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + apipod "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller/podgc/metrics" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/eviction" nodeutil "k8s.io/kubernetes/pkg/util/node" + utilpod "k8s.io/kubernetes/pkg/util/pod" "k8s.io/kubernetes/pkg/util/taints" ) @@ -50,9 +51,6 @@ const ( // quarantineTime defines how long Orphaned GC waits for nodes to show up // in an informer before issuing a GET call to check if they are truly gone quarantineTime = 40 * time.Second - - // field manager used to add pod failure condition and change the pod phase - fieldManager = "PodGC" ) type PodGCController struct { @@ -249,12 +247,12 @@ func (gcc *PodGCController) gcOrphaned(ctx context.Context, pods []*v1.Pod, node continue } logger.V(2).Info("Found orphaned Pod assigned to the Node, deleting", "pod", klog.KObj(pod), "node", klog.KRef("", pod.Spec.NodeName)) - condition := corev1apply.PodCondition(). - WithType(v1.DisruptionTarget). - WithStatus(v1.ConditionTrue). - WithReason("DeletionByPodGC"). - WithMessage("PodGC: node no longer exists"). - WithLastTransitionTime(metav1.Now()) + condition := &v1.PodCondition{ + Type: v1.DisruptionTarget, + Status: v1.ConditionTrue, + Reason: "DeletionByPodGC", + Message: "PodGC: node no longer exists", + } if err := gcc.markFailedAndDeletePodWithCondition(ctx, pod, condition); err != nil { utilruntime.HandleError(err) metrics.DeletingPodsErrorTotal.WithLabelValues(pod.Namespace, metrics.PodGCReasonOrphaned).Inc() @@ -341,7 +339,7 @@ func (gcc *PodGCController) markFailedAndDeletePod(ctx context.Context, pod *v1. return gcc.markFailedAndDeletePodWithCondition(ctx, pod, nil) } -func (gcc *PodGCController) markFailedAndDeletePodWithCondition(ctx context.Context, pod *v1.Pod, condition *corev1apply.PodConditionApplyConfiguration) error { +func (gcc *PodGCController) markFailedAndDeletePodWithCondition(ctx context.Context, pod *v1.Pod, condition *v1.PodCondition) error { logger := klog.FromContext(ctx) logger.Info("PodGC is force deleting Pod", "pod", klog.KObj(pod)) // Patch the pod to make sure it is transitioned to the Failed phase before deletion. @@ -354,17 +352,12 @@ func (gcc *PodGCController) markFailedAndDeletePodWithCondition(ctx context.Cont // is orphaned, in which case the pod would remain in the Running phase // forever as there is no kubelet running to change the phase. if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed { - podApply := corev1apply.Pod(pod.Name, pod.Namespace).WithStatus(corev1apply.PodStatus()) - // we don't need to extract the pod apply configuration and can send - // only phase and the DisruptionTarget condition as PodGC would not - // own other fields. If the DisruptionTarget condition is owned by - // PodGC it means that it is in the Failed phase, so sending the - // condition will not be re-attempted. - podApply.Status.WithPhase(v1.PodFailed) + newStatus := pod.Status.DeepCopy() + newStatus.Phase = v1.PodFailed if condition != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { - podApply.Status.WithConditions(condition) + apipod.UpdatePodCondition(newStatus, condition) } - if _, err := gcc.kubeClient.CoreV1().Pods(pod.Namespace).ApplyStatus(ctx, podApply, metav1.ApplyOptions{FieldManager: fieldManager, Force: true}); err != nil { + if _, _, _, err := utilpod.PatchPodStatus(ctx, gcc.kubeClient, pod.Namespace, pod.Name, pod.UID, pod.Status, *newStatus); err != nil { return err } } diff --git a/pkg/controller/podgc/gc_controller_test.go b/pkg/controller/podgc/gc_controller_test.go index 4bc112556132f..d78879b030795 100644 --- a/pkg/controller/podgc/gc_controller_test.go +++ b/pkg/controller/podgc/gc_controller_test.go @@ -714,16 +714,16 @@ func TestGCInspectingPatchedPodBeforeDeletion(t *testing.T) { Status: v1.PodStatus{ Phase: v1.PodFailed, Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, { Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "DeletionByPodGC", Message: "PodGC: node no longer exists", }, - { - Type: v1.PodReady, - Status: v1.ConditionTrue, - }, }, }, }, diff --git a/pkg/controller/replicaset/metrics/metrics.go b/pkg/controller/replicaset/metrics/metrics.go index 64b3a6dcd3f08..8fe43fce23c8e 100644 --- a/pkg/controller/replicaset/metrics/metrics.go +++ b/pkg/controller/replicaset/metrics/metrics.go @@ -26,8 +26,8 @@ var SortingDeletionAgeRatio = metrics.NewHistogram( &metrics.HistogramOpts{ Subsystem: ReplicaSetControllerSubsystem, Name: "sorting_deletion_age_ratio", - Help: "The ratio of chosen deleted pod's ages to the current youngest pod's age (at the time). Should be <2." + - "The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate's effect on" + + Help: "The ratio of chosen deleted pod's ages to the current youngest pod's age (at the time). Should be <2. " + + "The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate's effect on " + "the sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.", Buckets: metrics.ExponentialBuckets(0.25, 2, 6), StabilityLevel: metrics.ALPHA, diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 8b6292772c9ce..8eee7ef48c43d 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -151,16 +151,15 @@ func NewController(ctx context.Context, options *ControllerOptions) (*Controller ) if options.DiscoveryFunc != nil { - qm := &QuotaMonitor{ - informersStarted: options.InformersStarted, - informerFactory: options.InformerFactory, - ignoredResources: options.IgnoredResourcesFunc(), - resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"), - resyncPeriod: options.ReplenishmentResyncPeriod, - replenishmentFunc: rq.replenishQuota, - registry: rq.registry, - updateFilter: options.UpdateFilter, - } + qm := NewMonitor( + options.InformersStarted, + options.InformerFactory, + options.IgnoredResourcesFunc(), + options.ReplenishmentResyncPeriod, + rq.replenishQuota, + rq.registry, + options.UpdateFilter, + ) rq.quotaMonitor = qm diff --git a/pkg/controller/resourcequota/resource_quota_monitor.go b/pkg/controller/resourcequota/resource_quota_monitor.go index 34fba8a78ae72..fc2c6b6382f35 100644 --- a/pkg/controller/resourcequota/resource_quota_monitor.go +++ b/pkg/controller/resourcequota/resource_quota_monitor.go @@ -103,6 +103,20 @@ type QuotaMonitor struct { updateFilter UpdateFilter } +// NewMonitor creates a new instance of a QuotaMonitor +func NewMonitor(informersStarted <-chan struct{}, informerFactory informerfactory.InformerFactory, ignoredResources map[schema.GroupResource]struct{}, resyncPeriod controller.ResyncPeriodFunc, replenishmentFunc ReplenishmentFunc, registry quota.Registry, updateFilter UpdateFilter) *QuotaMonitor { + return &QuotaMonitor{ + informersStarted: informersStarted, + informerFactory: informerFactory, + ignoredResources: ignoredResources, + resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"), + resyncPeriod: resyncPeriod, + replenishmentFunc: replenishmentFunc, + registry: registry, + updateFilter: updateFilter, + } +} + // monitor runs a Controller with a local stop channel. type monitor struct { controller cache.Controller diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index f55b7d1d67c30..f0803bd68ebc7 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -337,10 +337,11 @@ func computeReplicaStatus(pods []*v1.Pod, minReadySeconds int32, currentRevision // count the number of current and update replicas if isCreated(pod) && !isTerminating(pod) { - if getPodRevision(pod) == currentRevision.Name { + revision := getPodRevision(pod) + if revision == currentRevision.Name { status.currentReplicas++ } - if getPodRevision(pod) == updateRevision.Name { + if revision == updateRevision.Name { status.updatedReplicas++ } } diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index d6bf9a78b14c2..08af86bedc40e 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -2429,6 +2429,12 @@ func (rt *requestTracker) reset() { rt.delay = 0 } +func (rt *requestTracker) getErr() error { + rt.Lock() + defer rt.Unlock() + return rt.err +} + func newRequestTracker(requests int, err error, after int) requestTracker { return requestTracker{ requests: requests, @@ -2474,7 +2480,7 @@ func (om *fakeObjectManager) CreatePod(ctx context.Context, pod *v1.Pod) error { defer om.createPodTracker.inc() if om.createPodTracker.errorReady() { defer om.createPodTracker.reset() - return om.createPodTracker.err + return om.createPodTracker.getErr() } pod.SetUID(types.UID(pod.Name + "-uid")) return om.podsIndexer.Update(pod) @@ -2492,7 +2498,7 @@ func (om *fakeObjectManager) DeletePod(pod *v1.Pod) error { defer om.deletePodTracker.inc() if om.deletePodTracker.errorReady() { defer om.deletePodTracker.reset() - return om.deletePodTracker.err + return om.deletePodTracker.getErr() } if key, err := controller.KeyFunc(pod); err != nil { return err @@ -2927,6 +2933,7 @@ func TestParallelScale(t *testing.T) { } { t.Run(tc.desc, func(t *testing.T) { set := burst(newStatefulSet(0)) + set.Spec.VolumeClaimTemplates[0].ObjectMeta.Labels = map[string]string{"test": "test"} parallelScale(t, set, tc.replicas, tc.desiredReplicas, assertBurstInvariants) }) } diff --git a/pkg/controller/statefulset/stateful_set_utils.go b/pkg/controller/statefulset/stateful_set_utils.go index 6898aabe56ed2..f46cbed61baf0 100644 --- a/pkg/controller/statefulset/stateful_set_utils.go +++ b/pkg/controller/statefulset/stateful_set_utils.go @@ -339,8 +339,8 @@ func getPersistentVolumeClaims(set *apps.StatefulSet, pod *v1.Pod) map[string]v1 templates := set.Spec.VolumeClaimTemplates claims := make(map[string]v1.PersistentVolumeClaim, len(templates)) for i := range templates { - claim := templates[i] - claim.Name = getPersistentVolumeClaimName(set, &claim, ordinal) + claim := templates[i].DeepCopy() + claim.Name = getPersistentVolumeClaimName(set, claim, ordinal) claim.Namespace = set.Namespace if claim.Labels != nil { for key, value := range set.Spec.Selector.MatchLabels { @@ -349,7 +349,7 @@ func getPersistentVolumeClaims(set *apps.StatefulSet, pod *v1.Pod) map[string]v1 } else { claim.Labels = set.Spec.Selector.MatchLabels } - claims[templates[i].Name] = claim + claims[templates[i].Name] = *claim } return claims } @@ -582,8 +582,9 @@ func inconsistentStatus(set *apps.StatefulSet, status *apps.StatefulSetStatus) b // are set to 0. func completeRollingUpdate(set *apps.StatefulSet, status *apps.StatefulSetStatus) { if set.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType && - status.UpdatedReplicas == status.Replicas && - status.ReadyReplicas == status.Replicas { + status.UpdatedReplicas == *set.Spec.Replicas && + status.ReadyReplicas == *set.Spec.Replicas && + status.Replicas == *set.Spec.Replicas { status.CurrentReplicas = status.UpdatedReplicas status.CurrentRevision = status.UpdateRevision } diff --git a/pkg/controller/volume/attachdetach/config/types.go b/pkg/controller/volume/attachdetach/config/types.go index b941c6a039a3d..aa3306933f5bc 100644 --- a/pkg/controller/volume/attachdetach/config/types.go +++ b/pkg/controller/volume/attachdetach/config/types.go @@ -27,6 +27,6 @@ type AttachDetachControllerConfiguration struct { // This flag enables or disables reconcile. Is false by default, and thus enabled. DisableAttachDetachReconcilerSync bool // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop - // wait between successive executions. Is set to 5 sec by default. + // wait between successive executions. Is set to 60 sec by default. ReconcilerSyncLoopPeriod metav1.Duration } diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go index 02995c0d4bdb5..8e1aec04ed771 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go @@ -750,17 +750,15 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { // The first detach will be triggered after at least 50ms (maxWaitForUnmountDuration in test). // Right before detach operation is performed, the volume will be first removed from being reported // as attached on node status (RemoveVolumeFromReportAsAttached). After detach operation which is expected to fail, - // controller then added the volume back as attached. + // controller then treats the attachment as Uncertain. // Here it sleeps 100ms so that detach should be triggered already at this point. - // verifyVolumeReportedAsAttachedToNode will check volume is in the list of volume attached that needs to be updated - // in node status. By calling this function (GetVolumesToReportAttached), node status should be updated, and the volume - // will not need to be updated until new changes are applied (detach is triggered again) time.Sleep(100 * time.Millisecond) - verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw, volumeAttachedCheckTimeout) + verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateUncertain, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, false, asw, volumeAttachedCheckTimeout) // Add a second pod which tries to attach the volume to the same node. - // After adding pod to the same node, detach will not be triggered any more. + // After adding pod to the same node, detach will not be triggered any more, + // the volume gets attached and reported as attached to the node. generatedVolumeName, podAddErr = dsw.AddPod(types.UniquePodName(podName2), controllervolumetesting.NewPod(podName2, podName2), volumeSpec, nodeName1) if podAddErr != nil { t.Fatalf("AddPod failed. Expected: Actual: <%v>", podAddErr) @@ -768,6 +766,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { // Sleep 1s to verify no detach are triggered after second pod is added in the future. time.Sleep(1000 * time.Millisecond) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw, volumeAttachedCheckTimeout) // verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw) // Add a third pod which tries to attach the volume to a different node. diff --git a/pkg/controller/volume/attachdetach/util/util_test.go b/pkg/controller/volume/attachdetach/util/util_test.go index b8f99a70bc3fe..b075189c48a0e 100644 --- a/pkg/controller/volume/attachdetach/util/util_test.go +++ b/pkg/controller/volume/attachdetach/util/util_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/client-go/informers" csitrans "k8s.io/csi-translation-lib" "k8s.io/klog/v2/ktesting" - fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/fc" @@ -282,7 +282,7 @@ func Test_CreateVolumeSpec(t *testing.T) { } } -func setup(nodeName string, t *testing.T) (*volume.VolumePluginMgr, csimigration.PluginManager, csitrans.CSITranslator, fakeframework.PersistentVolumeLister, fakeframework.PersistentVolumeClaimLister) { +func setup(nodeName string, t *testing.T) (*volume.VolumePluginMgr, csimigration.PluginManager, csitrans.CSITranslator, tf.PersistentVolumeLister, tf.PersistentVolumeClaimLister) { tmpDir, err := utiltesting.MkTmpdir("csi-test") if err != nil { t.Fatalf("can't make a temp dir: %v", err) @@ -313,7 +313,7 @@ func setup(nodeName string, t *testing.T) (*volume.VolumePluginMgr, csimigration plugMgr.Host = fakeAttachDetachVolumeHost - pvLister := fakeframework.PersistentVolumeLister{ + pvLister := tf.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: migratedVolume}, Spec: v1.PersistentVolumeSpec{ @@ -339,7 +339,7 @@ func setup(nodeName string, t *testing.T) (*volume.VolumePluginMgr, csimigration }, } - pvcLister := fakeframework.PersistentVolumeClaimLister{ + pvcLister := tf.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "migrated-pvc", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: migratedVolume}, diff --git a/pkg/controlplane/apiserver/apiextensions.go b/pkg/controlplane/apiserver/apiextensions.go index 4b58dd76d6177..2b4fb0ade0b27 100644 --- a/pkg/controlplane/apiserver/apiextensions.go +++ b/pkg/controlplane/apiserver/apiextensions.go @@ -50,7 +50,6 @@ func CreateAPIExtensionsConfig( // we assume that the etcd options have been completed already. avoid messing with anything outside // of changes to StorageConfig as that may lead to unexpected behavior when the options are applied. etcdOptions := *commandOptions.Etcd - etcdOptions.StorageConfig.Paging = true // this is where the true decodable levels come from. etcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion) // prefer the more compact serialization (v1beta1) for storage until https://issue.k8s.io/82292 is resolved for objects whose v1 serialization is too big but whose v1beta1 serialization can be stored diff --git a/pkg/controlplane/apiserver/config.go b/pkg/controlplane/apiserver/config.go index 824469107ea4d..52eed984f4334 100644 --- a/pkg/controlplane/apiserver/config.go +++ b/pkg/controlplane/apiserver/config.go @@ -152,7 +152,7 @@ func BuildGenericConfig( lastErr = fmt.Errorf("invalid authorization config: %v", err) return } - if !sets.NewString(s.Authorization.Modes...).Has(modes.ModeRBAC) { + if s.Authorization != nil && !sets.NewString(s.Authorization.Modes...).Has(modes.ModeRBAC) { genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName) } @@ -172,9 +172,15 @@ func BuildGenericConfig( return } -// BuildAuthorizer constructs the authorizer +// BuildAuthorizer constructs the authorizer. If authorization is not set in s, it returns nil, nil, nil func BuildAuthorizer(s controlplaneapiserver.CompletedOptions, EgressSelector *egressselector.EgressSelector, versionedInformers clientgoinformers.SharedInformerFactory) (authorizer.Authorizer, authorizer.RuleResolver, error) { - authorizationConfig := s.Authorization.ToAuthorizationConfig(versionedInformers) + authorizationConfig, err := s.Authorization.ToAuthorizationConfig(versionedInformers) + if err != nil { + return nil, nil, err + } + if authorizationConfig == nil { + return nil, nil, nil + } if EgressSelector != nil { egressDialer, err := EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext()) @@ -196,7 +202,6 @@ func BuildPriorityAndFairness(s controlplaneapiserver.CompletedOptions, extclien versionedInformer, extclient.FlowcontrolV1beta3(), s.GenericServerRunOptions.MaxRequestsInFlight+s.GenericServerRunOptions.MaxMutatingRequestsInFlight, - s.GenericServerRunOptions.RequestTimeout/4, ), nil } diff --git a/pkg/controlplane/apiserver/options/options.go b/pkg/controlplane/apiserver/options/options.go index d3e19ddce9c5a..a3a7b03442dd5 100644 --- a/pkg/controlplane/apiserver/options/options.go +++ b/pkg/controlplane/apiserver/options/options.go @@ -42,6 +42,8 @@ import ( "k8s.io/kubernetes/pkg/serviceaccount" ) +// Options define the flags and validation for a generic controlplane. If the +// structs are nil, the options are not added to the command line and not validated. type Options struct { GenericServerRunOptions *genericoptions.ServerRunOptions Etcd *genericoptions.EtcdOptions diff --git a/pkg/controlplane/apiserver/options/options_test.go b/pkg/controlplane/apiserver/options/options_test.go index f11ee47f676c2..fb928449f9699 100644 --- a/pkg/controlplane/apiserver/options/options_test.go +++ b/pkg/controlplane/apiserver/options/options_test.go @@ -143,7 +143,6 @@ func TestAddFlags(t *testing.T) { CertFile: "/var/run/kubernetes/etcdce.crt", TracerProvider: oteltrace.NewNoopTracerProvider(), }, - Paging: true, Prefix: "/registry", CompactionInterval: storagebackend.DefaultCompactInterval, CountMetricPollPeriod: time.Minute, @@ -283,6 +282,12 @@ func TestAddFlags(t *testing.T) { expected.Authentication.OIDC.UsernameClaim = "sub" expected.Authentication.OIDC.SigningAlgs = []string{"RS256"} + if !s.Authorization.AreLegacyFlagsSet() { + t.Errorf("expected legacy authorization flags to be set") + } + // setting the method to nil since methods can't be compared with reflect.DeepEqual + s.Authorization.AreLegacyFlagsSet = nil + if !reflect.DeepEqual(expected, s) { t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", cmp.Diff(expected, s, cmpopts.IgnoreUnexported(admission.Plugins{}, kubeoptions.OIDCAuthenticationOptions{}))) } diff --git a/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go b/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go index dcfb67d3887b2..e816ed2436434 100644 --- a/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go +++ b/pkg/controlplane/controller/clusterauthenticationtrust/cluster_authentication_trust_controller.go @@ -106,11 +106,11 @@ func NewClusterAuthenticationTrustController(requiredAuthenticationData ClusterA kubeSystemConfigMapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { if cast, ok := obj.(*corev1.ConfigMap); ok { - return cast.Name == configMapName + return cast.Namespace == configMapNamespace && cast.Name == configMapName } if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { - return cast.Name == configMapName + return cast.Namespace == configMapNamespace && cast.Name == configMapName } } return true // always return true just in case. The checks are fairly cheap diff --git a/pkg/controlplane/instance.go b/pkg/controlplane/instance.go index 723d61dd152c2..268095d012165 100644 --- a/pkg/controlplane/instance.go +++ b/pkg/controlplane/instance.go @@ -575,11 +575,6 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) if utilfeature.DefaultFeatureGate.Enabled(apiserverfeatures.APIServerIdentity) { m.GenericAPIServer.AddPostStartHookOrDie("start-kube-apiserver-identity-lease-controller", func(hookContext genericapiserver.PostStartHookContext) error { - kubeClient, err := kubernetes.NewForConfig(hookContext.LoopbackClientConfig) - if err != nil { - return err - } - // generate a context from stopCh. This is to avoid modifying files which are relying on apiserver // TODO: See if we can pass ctx to the current method ctx := wait.ContextForChannel(hookContext.StopCh) @@ -591,7 +586,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) // must replace ':,[]' in [ip:port] to be able to store this as a valid label value controller := lease.NewController( clock.RealClock{}, - kubeClient, + clientset, holderIdentity, int32(IdentityLeaseDurationSeconds), nil, @@ -605,12 +600,8 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) }) // TODO: move this into generic apiserver and make the lease identity value configurable m.GenericAPIServer.AddPostStartHookOrDie("start-kube-apiserver-identity-lease-garbage-collector", func(hookContext genericapiserver.PostStartHookContext) error { - kubeClient, err := kubernetes.NewForConfig(hookContext.LoopbackClientConfig) - if err != nil { - return err - } go apiserverleasegc.NewAPIServerLeaseGC( - kubeClient, + clientset, IdentityLeaseGCPeriod, metav1.NamespaceSystem, KubeAPIServerIdentityLeaseLabelSelector, @@ -620,11 +611,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) } m.GenericAPIServer.AddPostStartHookOrDie("start-legacy-token-tracking-controller", func(hookContext genericapiserver.PostStartHookContext) error { - kubeClient, err := kubernetes.NewForConfig(hookContext.LoopbackClientConfig) - if err != nil { - return err - } - go legacytokentracking.NewController(kubeClient).Run(hookContext.StopCh) + go legacytokentracking.NewController(clientset).Run(hookContext.StopCh) return nil }) diff --git a/pkg/credentialprovider/keyring_test.go b/pkg/credentialprovider/keyring_test.go index 07a63baa45a89..8535a7b7271f4 100644 --- a/pkg/credentialprovider/keyring_test.go +++ b/pkg/credentialprovider/keyring_test.go @@ -88,6 +88,22 @@ func TestURLsMatch(t *testing.T) { targetURL: "kubernetes.io", matchExpected: false, }, + { + globURL: "*kubernetes.io", + targetURL: "a.kubernetes.io", + matchExpected: false, + }, + // match when number of parts match + { + globURL: "*kubernetes.io", + targetURL: "kubernetes.io", + matchExpected: true, + }, + { + globURL: "*.*.*.kubernetes.io", + targetURL: "a.b.c.kubernetes.io", + matchExpected: true, + }, // no match when some parts mismatch { globURL: "kubernetes.io", diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 2548fe6f2b2ae..b1ff6aaf14b19 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -142,7 +142,8 @@ const ( // owner: @mfordjody // alpha: v1.26 // - // Skip validation Enable in next version + // Bypasses obsolete validation that GCP volumes are read-only when used in + // Deployments. SkipReadOnlyValidationGCE featuregate.Feature = "SkipReadOnlyValidationGCE" // owner: @trierra @@ -158,12 +159,6 @@ const ( // Enables the RBD in-tree driver to RBD CSI Driver migration feature. CSIMigrationRBD featuregate.Feature = "CSIMigrationRBD" - // owner: @divyenpatel - // beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u2, HW Version: VM version 15) - // GA: 1.26 - // Enables the vSphere in-tree driver to vSphere CSI Driver migration feature. - CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere" - // owner: @humblec, @zhucan // kep: https://kep.k8s.io/3171 // alpha: v1.25 @@ -178,6 +173,14 @@ const ( // Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it. CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth" + // owner: @seans3 + // kep: http://kep.k8s.io/4006 + // alpha: v1.29 + // + // Enables StreamTranslator proxy to handle WebSockets upgrade requests for the + // version of the RemoteCommand subprotocol that supports the "close" signal. + TranslateStreamCloseWebsocketRequests featuregate.Feature = "TranslateStreamCloseWebsocketRequests" + // owner: @nckturner // kep: http://kep.k8s.io/2699 // alpha: v1.27 @@ -202,15 +205,6 @@ const ( // Set the scheduled time as an annotation in the job. CronJobsScheduledAnnotation featuregate.Feature = "CronJobsScheduledAnnotation" - // owner: @deejross, @soltysh - // kep: https://kep.k8s.io/3140 - // alpha: v1.24 - // beta: v1.25 - // GA: 1.27 - // - // Enables support for time zones in CronJobs. - CronJobTimeZone featuregate.Feature = "CronJobTimeZone" - // owner: @thockin // deprecated: v1.28 // @@ -223,6 +217,7 @@ const ( // owner: @elezar // kep: http://kep.k8s.io/4009 // alpha: v1.28 + // beta: v1.29 // // Add support for CDI Device IDs in the Device Plugin API. DevicePluginCDIDevices featuregate.Feature = "DevicePluginCDIDevices" @@ -241,14 +236,6 @@ const ( // Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials. DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders" - // owner: @derekwaynecarr - // alpha: v1.20 - // beta: v1.21 (off by default until 1.22) - // ga: v1.27 - // - // Enables usage of hugepages- in downward API. - DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages" - // owner: @pohly // kep: http://kep.k8s.io/3063 // alpha: v1.26 @@ -290,15 +277,6 @@ const ( // This flag used to be needed for dockershim CRI and currently does nothing. ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting" - // owner: @yuzhiquan, @bowei, @PxyUp, @SergeyKanzhelev - // kep: https://kep.k8s.io/2727 - // alpha: v1.23 - // beta: v1.24 - // stable: v1.27 - // - // Enables GRPC probe method for {Liveness,Readiness,Startup}Probe. - GRPCContainerProbe featuregate.Feature = "GRPCContainerProbe" - // owner: @bobbypage // alpha: v1.20 // beta: v1.21 @@ -395,19 +373,11 @@ const ( // owner: @mimowo // kep: https://kep.k8s.io/3850 // alpha: v1.28 + // beta: v1.29 // // Allows users to specify counting of failed pods per index. JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex" - // owner: @ahg - // beta: v1.23 - // stable: v1.27 - // - // Allow updating node scheduling directives in the pod template of jobs. Specifically, - // node affinity, selector and tolerations. This is allowed only for suspended jobs - // that have never been unsuspended before. - JobMutableNodeSchedulingDirectives featuregate.Feature = "JobMutableNodeSchedulingDirectives" - // owner: @mimowo // kep: https://kep.k8s.io/3329 // alpha: v1.25 @@ -431,17 +401,6 @@ const ( // Track the number of pods with Ready condition in the Job status. JobReadyPods featuregate.Feature = "JobReadyPods" - // owner: @alculquicondor - // alpha: v1.22 - // beta: v1.23 - // stable: v1.26 - // - // Track Job completion without relying on Pod remaining in the cluster - // indefinitely. Pod finalizers, in addition to a field in the Job status - // allow the Job controller to keep track of Pods that it didn't account for - // yet. - JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers" - // owner: @marquiz // kep: http://kep.k8s.io/4033 // alpha: v1.28 @@ -504,14 +463,6 @@ const ( // `externalTrafficPolicy: Cluster` services. KubeProxyDrainingTerminatingNodes featuregate.Feature = "KubeProxyDrainingTerminatingNodes" - // owner: @zshihang - // kep: https://kep.k8s.io/2800 - // beta: v1.24 - // ga: v1.26 - // - // Stop auto-generation of secret-based service account tokens. - LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration" - // owner: @zshihang // kep: http://kep.k8s.io/2800 // alpha: v1.26 @@ -541,6 +492,13 @@ const ( // Enables scaling down replicas via logarithmic comparison of creation/ready timestamps LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown" + // owner: @sanposhiho + // kep: https://kep.k8s.io/3633 + // alpha: v1.29 + // + // Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity. + MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity" + // owner: @denkensk // kep: https://kep.k8s.io/3243 // alpha: v1.25 @@ -584,13 +542,6 @@ const ( // Enables new performance-improving code in kube-proxy iptables mode MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore" - // owner: @sarveshr7 - // kep: https://kep.k8s.io/2593 - // alpha: v1.25 - // - // Enables the MultiCIDR Range allocator. - MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator" - // owner: @aojea // kep: https://kep.k8s.io/1880 // alpha: v1.27 @@ -674,8 +625,9 @@ const ( // Set pod completion index as a pod label for Indexed Jobs. PodIndexLabel featuregate.Feature = "PodIndexLabel" - // owner: @ddebroy + // owner: @ddebroy, @kannon92 // alpha: v1.25 + // beta: v1.29 // // Enables reporting of PodReadyToStartContainersCondition condition in pod status after pod // sandbox creation and network configuration completes successfully @@ -688,6 +640,13 @@ const ( // Adds pod.status.hostIPs and downward API PodHostIPs featuregate.Feature = "PodHostIPs" + // owner: @AxeZhan + // kep: http://kep.k8s.io/3960 + // alpha: v1.29 + // + // Enables SleepAction in container lifecycle hooks + PodLifecycleSleepAction featuregate.Feature = "PodLifecycleSleepAction" + // owner: @Huang-Wei // kep: https://kep.k8s.io/3521 // alpha: v1.26 @@ -696,14 +655,6 @@ const ( // Enable users to specify when a Pod is ready for scheduling. PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness" - // owner: @rphillips - // alpha: v1.21 - // beta: v1.22 - // ga: v1.28 - // - // Allows user to override pod-level terminationGracePeriod for probes - ProbeTerminationGracePeriod featuregate.Feature = "ProbeTerminationGracePeriod" - // owner: @jessfraz // alpha: v1.12 // @@ -730,6 +681,7 @@ const ( // kep: https://kep.k8s.io/2485 // alpha: v1.22 // beta: v1.27 + // GA: v1.29 // // Enables usage of the ReadWriteOncePod PersistentVolume access mode. ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod" @@ -741,15 +693,6 @@ const ( // Allow users to recover from volume expansion failure RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure" - // owner: @RomanBednar - // kep: https://kep.k8s.io/3333 - // alpha: v1.25 - // beta: 1.26 - // stable: v1.28 - // - // Allow assigning StorageClass to unbound PVCs retroactively - RetroactiveDefaultStorageClass featuregate.Feature = "RetroactiveDefaultStorageClass" - // owner: @mikedanese // alpha: v1.7 // beta: v1.12 @@ -769,22 +712,13 @@ const ( ElasticIndexedJob featuregate.Feature = "ElasticIndexedJob" // owner: @sanposhiho - // kep: http://kep.k8s.io/3063 + // kep: http://kep.k8s.io/4247 // beta: v1.28 // // Enables the scheduler's enhancement called QueueingHints, // which benefits to reduce the useless requeueing. SchedulerQueueingHints featuregate.Feature = "SchedulerQueueingHints" - // owner: @saschagrunert - // kep: https://kep.k8s.io/2413 - // alpha: v1.22 - // beta: v1.25 - // ga: v1.27 - // - // Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads. - SeccompDefault featuregate.Feature = "SeccompDefault" - // owner: @mtardy // alpha: v1.0 // @@ -848,14 +782,6 @@ const ( // Enables topology aware hints for EndpointSlices TopologyAwareHints featuregate.Feature = "TopologyAwareHints" - // owner: @lmdaly, @swatisehgal (for GA graduation) - // alpha: v1.16 - // beta: v1.18 - // GA: v1.27 - // - // Enable resource managers to make NUMA aligned decisions - TopologyManager featuregate.Feature = "TopologyManager" - // owner: @PiotrProkop // kep: https://kep.k8s.io/3545 // alpha: v1.26 @@ -951,6 +877,13 @@ const ( // alpha: v1.29 // LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode" + + // owner: @haircommander + // kep: http://kep.k8s.io/4210 + // alpha: v1.29 + // ImageMaximumGCAge enables the Kubelet configuration field of the same name, allowing an admin + // to specify the age after which an image will be garbage collected. + ImageMaximumGCAge featuregate.Feature = "ImageMaximumGCAge" ) func init() { @@ -994,13 +927,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CSIMigrationRBD: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31 - CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - CSINodeExpandSecret: {Default: true, PreRelease: featuregate.Beta}, CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha}, - SkipReadOnlyValidationGCE: {Default: false, PreRelease: featuregate.Alpha}, + SkipReadOnlyValidationGCE: {Default: true, PreRelease: featuregate.Deprecated}, // remove in 1.31 + + TranslateStreamCloseWebsocketRequests: {Default: false, PreRelease: featuregate.Alpha}, CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha}, @@ -1010,17 +943,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CronJobsScheduledAnnotation: {Default: true, PreRelease: featuregate.Beta}, - CronJobTimeZone: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - DefaultHostNetworkHostPortsInPodTemplates: {Default: false, PreRelease: featuregate.Deprecated}, DisableCloudProviders: {Default: true, PreRelease: featuregate.Beta}, DisableKubeletCloudCredentialProviders: {Default: true, PreRelease: featuregate.Beta}, - DevicePluginCDIDevices: {Default: false, PreRelease: featuregate.Alpha}, - - DownwardAPIHugePages: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.29 + DevicePluginCDIDevices: {Default: true, PreRelease: featuregate.Beta}, DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, @@ -1032,8 +961,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.30 - GRPCContainerProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.29 - GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta}, GracefulNodeShutdownBasedOnPodPriority: {Default: true, PreRelease: featuregate.Beta}, @@ -1060,17 +987,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS IPTablesOwnershipCleanup: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - JobBackoffLimitPerIndex: {Default: false, PreRelease: featuregate.Alpha}, - - JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta}, JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta}, JobPodReplacementPolicy: {Default: false, PreRelease: featuregate.Alpha}, - JobReadyPods: {Default: true, PreRelease: featuregate.Beta}, - - JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 + JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 KubeletCgroupDriverFromCRI: {Default: false, PreRelease: featuregate.Alpha}, @@ -1088,8 +1011,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS KubeProxyDrainingTerminatingNodes: {Default: false, PreRelease: featuregate.Alpha}, - LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 LegacyServiceAccountTokenCleanUp: {Default: false, PreRelease: featuregate.Alpha}, @@ -1098,6 +1019,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta}, + MatchLabelKeysInPodAffinity: {Default: false, PreRelease: featuregate.Alpha}, + MatchLabelKeysInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha}, @@ -1110,8 +1033,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, - MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha}, NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta}, @@ -1132,13 +1053,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta}, - PodReadyToStartContainersCondition: {Default: false, PreRelease: featuregate.Alpha}, + PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta}, PodHostIPs: {Default: false, PreRelease: featuregate.Alpha}, - PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta}, + PodLifecycleSleepAction: {Default: false, PreRelease: featuregate.Alpha}, - ProbeTerminationGracePeriod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta}, ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, @@ -1146,20 +1067,16 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, - ReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta}, + ReadWriteOncePod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha}, - RetroactiveDefaultStorageClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta}, ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta}, SchedulerQueueingHints: {Default: true, PreRelease: featuregate.Beta}, - SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - SecurityContextDeny: {Default: false, PreRelease: featuregate.Alpha}, ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.29; remove in 1.31 @@ -1176,8 +1093,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta}, - TopologyManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.27; remove in 1.29 - TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha}, TopologyManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta}, @@ -1206,6 +1121,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS LoadBalancerIPMode: {Default: false, PreRelease: featuregate.Alpha}, + ImageMaximumGCAge: {Default: false, PreRelease: featuregate.Alpha}, + // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -1225,12 +1142,14 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + genericfeatures.StructuredAuthorizationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, + + genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index cfa982d2fb002..42d251c74f442 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -552,6 +552,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref), "k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref), "k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref), + "k8s.io/api/core/v1.SleepAction": schema_k8sio_api_core_v1_SleepAction(ref), "k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref), "k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref), "k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref), @@ -738,9 +739,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/networking/v1.NetworkPolicyPort": schema_k8sio_api_networking_v1_NetworkPolicyPort(ref), "k8s.io/api/networking/v1.NetworkPolicySpec": schema_k8sio_api_networking_v1_NetworkPolicySpec(ref), "k8s.io/api/networking/v1.ServiceBackendPort": schema_k8sio_api_networking_v1_ServiceBackendPort(ref), - "k8s.io/api/networking/v1alpha1.ClusterCIDR": schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref), - "k8s.io/api/networking/v1alpha1.ClusterCIDRList": schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref), - "k8s.io/api/networking/v1alpha1.ClusterCIDRSpec": schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref), "k8s.io/api/networking/v1alpha1.IPAddress": schema_k8sio_api_networking_v1alpha1_IPAddress(ref), "k8s.io/api/networking/v1alpha1.IPAddressList": schema_k8sio_api_networking_v1alpha1_IPAddressList(ref), "k8s.io/api/networking/v1alpha1.IPAddressSpec": schema_k8sio_api_networking_v1alpha1_IPAddressSpec(ref), @@ -4761,7 +4759,6 @@ func schema_k8sio_api_apiserverinternal_v1alpha1_StorageVersionCondition(ref com "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -4948,7 +4945,6 @@ func schema_k8sio_api_apps_v1_ControllerRevision(ref common.ReferenceCallback) c "data": { SchemaProps: spec.SchemaProps{ Description: "Data is the serialized representation of the state.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -5096,7 +5092,6 @@ func schema_k8sio_api_apps_v1_DaemonSetCondition(ref common.ReferenceCallback) c "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -5432,14 +5427,12 @@ func schema_k8sio_api_apps_v1_DeploymentCondition(ref common.ReferenceCallback) "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "The last time this condition was updated.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -5782,7 +5775,6 @@ func schema_k8sio_api_apps_v1_ReplicaSetCondition(ref common.ReferenceCallback) "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "The last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -6133,7 +6125,6 @@ func schema_k8sio_api_apps_v1_StatefulSetCondition(ref common.ReferenceCallback) "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -6519,7 +6510,6 @@ func schema_k8sio_api_apps_v1beta1_ControllerRevision(ref common.ReferenceCallba "data": { SchemaProps: spec.SchemaProps{ Description: "data is the serialized representation of the state.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -6667,14 +6657,12 @@ func schema_k8sio_api_apps_v1beta1_DeploymentCondition(ref common.ReferenceCallb "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "The last time this condition was updated.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -7272,7 +7260,6 @@ func schema_k8sio_api_apps_v1beta1_StatefulSetCondition(ref common.ReferenceCall "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -7654,7 +7641,6 @@ func schema_k8sio_api_apps_v1beta2_ControllerRevision(ref common.ReferenceCallba "data": { SchemaProps: spec.SchemaProps{ Description: "Data is the serialized representation of the state.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -7802,7 +7788,6 @@ func schema_k8sio_api_apps_v1beta2_DaemonSetCondition(ref common.ReferenceCallba "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -8137,14 +8122,12 @@ func schema_k8sio_api_apps_v1beta2_DeploymentCondition(ref common.ReferenceCallb "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "The last time this condition was updated.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -8486,7 +8469,6 @@ func schema_k8sio_api_apps_v1beta2_ReplicaSetCondition(ref common.ReferenceCallb "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "The last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -8956,7 +8938,6 @@ func schema_k8sio_api_apps_v1beta2_StatefulSetCondition(ref common.ReferenceCall "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -9526,7 +9507,6 @@ func schema_k8sio_api_authentication_v1_TokenRequestStatus(ref common.ReferenceC "expirationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "ExpirationTimestamp is the time of expiration of the returned token.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -11421,7 +11401,6 @@ func schema_k8sio_api_autoscaling_v1_ContainerResourceMetricStatus(ref common.Re "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -11550,7 +11529,6 @@ func schema_k8sio_api_autoscaling_v1_ExternalMetricStatus(ref common.ReferenceCa "currentValue": { SchemaProps: spec.SchemaProps{ Description: "currentValue is the current value of the metric (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -11645,7 +11623,6 @@ func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerCondition(ref common "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -11955,7 +11932,6 @@ func schema_k8sio_api_autoscaling_v1_ObjectMetricSource(ref common.ReferenceCall "targetValue": { SchemaProps: spec.SchemaProps{ Description: "targetValue is the target value of the metric (as a quantity).", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -12005,7 +11981,6 @@ func schema_k8sio_api_autoscaling_v1_ObjectMetricStatus(ref common.ReferenceCall "currentValue": { SchemaProps: spec.SchemaProps{ Description: "currentValue is the current value of the metric (as a quantity).", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -12048,7 +12023,6 @@ func schema_k8sio_api_autoscaling_v1_PodsMetricSource(ref common.ReferenceCallba "targetAverageValue": { SchemaProps: spec.SchemaProps{ Description: "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -12085,7 +12059,6 @@ func schema_k8sio_api_autoscaling_v1_PodsMetricStatus(ref common.ReferenceCallba "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -12166,7 +12139,6 @@ func schema_k8sio_api_autoscaling_v1_ResourceMetricStatus(ref common.ReferenceCa "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -12642,7 +12614,6 @@ func schema_k8sio_api_autoscaling_v2_HorizontalPodAutoscalerCondition(ref common "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -13351,7 +13322,6 @@ func schema_k8sio_api_autoscaling_v2beta1_ContainerResourceMetricStatus(ref comm "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -13475,7 +13445,6 @@ func schema_k8sio_api_autoscaling_v2beta1_ExternalMetricStatus(ref common.Refere "currentValue": { SchemaProps: spec.SchemaProps{ Description: "currentValue is the current value of the metric (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -13570,7 +13539,6 @@ func schema_k8sio_api_autoscaling_v2beta1_HorizontalPodAutoscalerCondition(ref c "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -13906,7 +13874,6 @@ func schema_k8sio_api_autoscaling_v2beta1_ObjectMetricSource(ref common.Referenc "targetValue": { SchemaProps: spec.SchemaProps{ Description: "targetValue is the target value of the metric (as a quantity).", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -13956,7 +13923,6 @@ func schema_k8sio_api_autoscaling_v2beta1_ObjectMetricStatus(ref common.Referenc "currentValue": { SchemaProps: spec.SchemaProps{ Description: "currentValue is the current value of the metric (as a quantity).", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -13999,7 +13965,6 @@ func schema_k8sio_api_autoscaling_v2beta1_PodsMetricSource(ref common.ReferenceC "targetAverageValue": { SchemaProps: spec.SchemaProps{ Description: "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -14036,7 +14001,6 @@ func schema_k8sio_api_autoscaling_v2beta1_PodsMetricStatus(ref common.ReferenceC "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -14117,7 +14081,6 @@ func schema_k8sio_api_autoscaling_v2beta1_ResourceMetricStatus(ref common.Refere "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -14489,7 +14452,6 @@ func schema_k8sio_api_autoscaling_v2beta2_HorizontalPodAutoscalerCondition(ref c "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -15404,14 +15366,12 @@ func schema_k8sio_api_batch_v1_JobCondition(ref common.ReferenceCallback) common "lastProbeTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition was checked.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transit from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -15532,14 +15492,14 @@ func schema_k8sio_api_batch_v1_JobSpec(ref common.ReferenceCallback) common.Open }, "backoffLimitPerIndex": { SchemaProps: spec.SchemaProps{ - Description: "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + Description: "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", Type: []string{"integer"}, Format: "int32", }, }, "maxFailedIndexes": { SchemaProps: spec.SchemaProps{ - Description: "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + Description: "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", Type: []string{"integer"}, Format: "int32", }, @@ -15680,7 +15640,7 @@ func schema_k8sio_api_batch_v1_JobStatus(ref common.ReferenceCallback) common.Op }, "failedIndexes": { SchemaProps: spec.SchemaProps{ - Description: "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + Description: "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", Type: []string{"string"}, Format: "", }, @@ -15859,7 +15819,7 @@ func schema_k8sio_api_batch_v1_PodFailurePolicyRule(ref common.ReferenceCallback Properties: map[string]spec.Schema{ "action": { SchemaProps: spec.SchemaProps{ - Description: "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.\n\nPossible enum values:\n - `\"Count\"` This is an action which might be taken on a pod failure - the pod failure is handled in the default way - the counter towards .backoffLimit, represented by the job's .status.failed field, is incremented.\n - `\"FailIndex\"` This is an action which might be taken on a pod failure - mark the Job's index as failed to avoid restarts within this index. This action can only be used when backoffLimitPerIndex is set.\n - `\"FailJob\"` This is an action which might be taken on a pod failure - mark the pod's job as Failed and terminate all running pods.\n - `\"Ignore\"` This is an action which might be taken on a pod failure - the counter towards .backoffLimit, represented by the job's .status.failed field, is not incremented and a replacement pod is created.", + Description: "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.\n\nPossible enum values:\n - `\"Count\"` This is an action which might be taken on a pod failure - the pod failure is handled in the default way - the counter towards .backoffLimit, represented by the job's .status.failed field, is incremented.\n - `\"FailIndex\"` This is an action which might be taken on a pod failure - mark the Job's index as failed to avoid restarts within this index. This action can only be used when backoffLimitPerIndex is set.\n - `\"FailJob\"` This is an action which might be taken on a pod failure - mark the pod's job as Failed and terminate all running pods.\n - `\"Ignore\"` This is an action which might be taken on a pod failure - the counter towards .backoffLimit, represented by the job's .status.failed field, is not incremented and a replacement pod is created.", Default: "", Type: []string{"string"}, Format: "", @@ -16292,14 +16252,12 @@ func schema_k8sio_api_certificates_v1_CertificateSigningRequestCondition(ref com "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "lastUpdateTime is the time of the last update to this condition", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -16742,14 +16700,12 @@ func schema_k8sio_api_certificates_v1beta1_CertificateSigningRequestCondition(re "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "timestamp for the last update to this condition", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -18962,7 +18918,6 @@ func schema_k8sio_api_core_v1_ContainerStateRunning(ref common.ReferenceCallback "startedAt": { SchemaProps: spec.SchemaProps{ Description: "Time at which the container was last (re-)started", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -19013,14 +18968,12 @@ func schema_k8sio_api_core_v1_ContainerStateTerminated(ref common.ReferenceCallb "startedAt": { SchemaProps: spec.SchemaProps{ Description: "Time at which previous execution of the container started", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "finishedAt": { SchemaProps: spec.SchemaProps{ Description: "Time at which the container last terminated", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -19150,8 +19103,7 @@ func schema_k8sio_api_core_v1_ContainerStatus(ref common.ReferenceCallback) comm Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -19411,7 +19363,7 @@ func schema_k8sio_api_core_v1_EndpointPort(ref common.ReferenceCallback) common. }, "appProtocol": { SchemaProps: spec.SchemaProps{ - Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", Type: []string{"string"}, Format: "", }, @@ -20331,14 +20283,12 @@ func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPI "firstTimestamp": { SchemaProps: spec.SchemaProps{ Description: "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTimestamp": { SchemaProps: spec.SchemaProps{ Description: "The time at which the most recent occurrence of this event was recorded.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -20359,7 +20309,6 @@ func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPI "eventTime": { SchemaProps: spec.SchemaProps{ Description: "Time when this Event was first observed.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -20475,7 +20424,6 @@ func schema_k8sio_api_core_v1_EventSeries(ref common.ReferenceCallback) common.O "lastObservedTime": { SchemaProps: spec.SchemaProps{ Description: "Time of the last occurrence observed", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -20959,7 +20907,6 @@ func schema_k8sio_api_core_v1_HTTPGetAction(ref common.ReferenceCallback) common "port": { SchemaProps: spec.SchemaProps{ Description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -21411,11 +21358,17 @@ func schema_k8sio_api_core_v1_LifecycleHandler(ref common.ReferenceCallback) com Ref: ref("k8s.io/api/core/v1.TCPSocketAction"), }, }, + "sleep": { + SchemaProps: spec.SchemaProps{ + Description: "Sleep represents the duration that the container should sleep before being terminated.", + Ref: ref("k8s.io/api/core/v1.SleepAction"), + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"}, + "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.SleepAction", "k8s.io/api/core/v1.TCPSocketAction"}, } } @@ -21485,8 +21438,7 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -21500,8 +21452,7 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -21515,8 +21466,7 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -21530,8 +21480,7 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -21545,8 +21494,7 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -21677,8 +21625,7 @@ func schema_k8sio_api_core_v1_List(ref common.ReferenceCallback) common.OpenAPID Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -21943,8 +21890,7 @@ func schema_k8sio_api_core_v1_NamespaceCondition(ref common.ReferenceCallback) c }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "reason": { @@ -22231,14 +22177,12 @@ func schema_k8sio_api_core_v1_NodeCondition(ref common.ReferenceCallback) common "lastHeartbeatTime": { SchemaProps: spec.SchemaProps{ Description: "Last time we got an update on a given condition.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transit from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -22448,8 +22392,7 @@ func schema_k8sio_api_core_v1_NodeResources(ref common.ReferenceCallback) common Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -22691,8 +22634,7 @@ func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.Op Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -22706,8 +22648,7 @@ func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.Op Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -23151,14 +23092,12 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref common.Referenc "lastProbeTime": { SchemaProps: spec.SchemaProps{ Description: "lastProbeTime is the time we probed the condition.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -23351,8 +23290,7 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -23386,8 +23324,7 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -23694,8 +23631,7 @@ func schema_k8sio_api_core_v1_PersistentVolumeSpec(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -24085,7 +24021,7 @@ func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) comm Properties: map[string]spec.Schema{ "labelSelector": { SchemaProps: spec.SchemaProps{ - Description: "A label query over a set of resources, in this case pods.", + Description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, @@ -24118,6 +24054,46 @@ func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) comm Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, + "matchLabelKeys": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "mismatchLabelKeys": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"topologyKey"}, }, @@ -24258,14 +24234,12 @@ func schema_k8sio_api_core_v1_PodCondition(ref common.ReferenceCallback) common. "lastProbeTime": { SchemaProps: spec.SchemaProps{ Description: "Last time we probed the condition.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -25283,8 +25257,7 @@ func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenA Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -25854,7 +25827,6 @@ func schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref common.ReferenceCallback) "evictionTime": { SchemaProps: spec.SchemaProps{ Description: "Time at which this entry was added to the list.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -26414,7 +26386,6 @@ func schema_k8sio_api_core_v1_ReplicationControllerCondition(ref common.Referenc "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "The last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -26667,7 +26638,6 @@ func schema_k8sio_api_core_v1_ResourceFieldSelector(ref common.ReferenceCallback "divisor": { SchemaProps: spec.SchemaProps{ Description: "Specifies the output format of the exposed resources, defaults to \"1\"", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -26801,8 +26771,7 @@ func schema_k8sio_api_core_v1_ResourceQuotaSpec(ref common.ReferenceCallback) co Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -26852,8 +26821,7 @@ func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -26867,8 +26835,7 @@ func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -26897,8 +26864,7 @@ func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -26912,8 +26878,7 @@ func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -28027,7 +27992,7 @@ func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.O }, "appProtocol": { SchemaProps: spec.SchemaProps{ - Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", Type: []string{"string"}, Format: "", }, @@ -28043,7 +28008,6 @@ func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.O "targetPort": { SchemaProps: spec.SchemaProps{ Description: "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -28390,6 +28354,28 @@ func schema_k8sio_api_core_v1_SessionAffinityConfig(ref common.ReferenceCallback } } +func schema_k8sio_api_core_v1_SleepAction(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SleepAction describes a \"sleep\" action.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "seconds": { + SchemaProps: spec.SchemaProps{ + Description: "Seconds is the number of seconds to sleep.", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + Required: []string{"seconds"}, + }, + }, + } +} + func schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -28528,7 +28514,6 @@ func schema_k8sio_api_core_v1_TCPSocketAction(ref common.ReferenceCallback) comm "port": { SchemaProps: spec.SchemaProps{ Description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -29252,8 +29237,7 @@ func schema_k8sio_api_core_v1_VolumeResourceRequirements(ref common.ReferenceCal Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -29267,8 +29251,7 @@ func schema_k8sio_api_core_v1_VolumeResourceRequirements(ref common.ReferenceCal Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -29755,7 +29738,7 @@ func schema_k8sio_api_discovery_v1_EndpointPort(ref common.ReferenceCallback) co Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + Description: "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", Type: []string{"string"}, Format: "", }, @@ -29777,7 +29760,7 @@ func schema_k8sio_api_discovery_v1_EndpointPort(ref common.ReferenceCallback) co }, "appProtocol": { SchemaProps: spec.SchemaProps{ - Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", Type: []string{"string"}, Format: "", }, @@ -30112,7 +30095,7 @@ func schema_k8sio_api_discovery_v1beta1_EndpointPort(ref common.ReferenceCallbac Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + Description: "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", Type: []string{"string"}, Format: "", }, @@ -30332,7 +30315,6 @@ func schema_k8sio_api_events_v1_Event(ref common.ReferenceCallback) common.OpenA "eventTime": { SchemaProps: spec.SchemaProps{ Description: "eventTime is the time when this Event was first observed. It is required.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -30407,14 +30389,12 @@ func schema_k8sio_api_events_v1_Event(ref common.ReferenceCallback) common.OpenA "deprecatedFirstTimestamp": { SchemaProps: spec.SchemaProps{ Description: "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "deprecatedLastTimestamp": { SchemaProps: spec.SchemaProps{ Description: "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -30503,7 +30483,6 @@ func schema_k8sio_api_events_v1_EventSeries(ref common.ReferenceCallback) common "lastObservedTime": { SchemaProps: spec.SchemaProps{ Description: "lastObservedTime is the time when last Event from the series was seen before last heartbeat.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -30547,7 +30526,6 @@ func schema_k8sio_api_events_v1beta1_Event(ref common.ReferenceCallback) common. "eventTime": { SchemaProps: spec.SchemaProps{ Description: "eventTime is the time when this Event was first observed. It is required.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -30622,14 +30600,12 @@ func schema_k8sio_api_events_v1beta1_Event(ref common.ReferenceCallback) common. "deprecatedFirstTimestamp": { SchemaProps: spec.SchemaProps{ Description: "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "deprecatedLastTimestamp": { SchemaProps: spec.SchemaProps{ Description: "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -30718,7 +30694,6 @@ func schema_k8sio_api_events_v1beta1_EventSeries(ref common.ReferenceCallback) c "lastObservedTime": { SchemaProps: spec.SchemaProps{ Description: "lastObservedTime is the time when last Event from the series was seen before last heartbeat.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -30807,7 +30782,6 @@ func schema_k8sio_api_extensions_v1beta1_DaemonSetCondition(ref common.Reference "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -31149,14 +31123,12 @@ func schema_k8sio_api_extensions_v1beta1_DeploymentCondition(ref common.Referenc "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "The last time this condition was updated.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -31660,7 +31632,6 @@ func schema_k8sio_api_extensions_v1beta1_IngressBackend(ref common.ReferenceCall "servicePort": { SchemaProps: spec.SchemaProps{ Description: "Specifies the port of the referenced service.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -32397,7 +32368,6 @@ func schema_k8sio_api_extensions_v1beta1_ReplicaSetCondition(ref common.Referenc "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "The last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -32906,7 +32876,6 @@ func schema_k8sio_api_flowcontrol_v1beta1_FlowSchemaCondition(ref common.Referen "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "`lastTransitionTime` is the last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -33149,7 +33118,7 @@ func schema_k8sio_api_flowcontrol_v1beta1_LimitedPriorityLevelConfiguration(ref Properties: map[string]spec.Schema{ "assuredConcurrencyShares": { SchemaProps: spec.SchemaProps{ - Description: "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + Description: "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", Default: 0, Type: []string{"integer"}, Format: "int32", @@ -33385,7 +33354,6 @@ func schema_k8sio_api_flowcontrol_v1beta1_PriorityLevelConfigurationCondition(re "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "`lastTransitionTime` is the last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -33939,7 +33907,6 @@ func schema_k8sio_api_flowcontrol_v1beta2_FlowSchemaCondition(ref common.Referen "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "`lastTransitionTime` is the last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -34182,7 +34149,7 @@ func schema_k8sio_api_flowcontrol_v1beta2_LimitedPriorityLevelConfiguration(ref Properties: map[string]spec.Schema{ "assuredConcurrencyShares": { SchemaProps: spec.SchemaProps{ - Description: "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + Description: "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", Default: 0, Type: []string{"integer"}, Format: "int32", @@ -34418,7 +34385,6 @@ func schema_k8sio_api_flowcontrol_v1beta2_PriorityLevelConfigurationCondition(re "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "`lastTransitionTime` is the last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -34972,7 +34938,6 @@ func schema_k8sio_api_flowcontrol_v1beta3_FlowSchemaCondition(ref common.Referen "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "`lastTransitionTime` is the last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -35453,7 +35418,6 @@ func schema_k8sio_api_flowcontrol_v1beta3_PriorityLevelConfigurationCondition(re "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "`lastTransitionTime` is the last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -37128,146 +37092,6 @@ func schema_k8sio_api_networking_v1_ServiceBackendPort(ref common.ReferenceCallb } } -func schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDRSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/networking/v1alpha1.ClusterCIDRSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCIDRList contains a list of ClusterCIDR.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "items is the list of ClusterCIDRs.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDR"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/networking/v1alpha1.ClusterCIDR", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", - Ref: ref("k8s.io/api/core/v1.NodeSelector"), - }, - }, - "perNodeHostBits": { - SchemaProps: spec.SchemaProps{ - Description: "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - Default: 0, - Type: []string{"integer"}, - Format: "int32", - }, - }, - "ipv4": { - SchemaProps: spec.SchemaProps{ - Description: "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "ipv6": { - SchemaProps: spec.SchemaProps{ - Description: "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"perNodeHostBits"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelector"}, - } -} - func schema_k8sio_api_networking_v1alpha1_IPAddress(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -37418,13 +37242,6 @@ func schema_k8sio_api_networking_v1alpha1_ParentReference(ref common.ReferenceCa Format: "", }, }, - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "UID is the uid of the object being referenced.", - Type: []string{"string"}, - Format: "", - }, - }, }, }, }, @@ -37565,7 +37382,6 @@ func schema_k8sio_api_networking_v1beta1_IngressBackend(ref common.ReferenceCall "servicePort": { SchemaProps: spec.SchemaProps{ Description: "servicePort Specifies the port of the referenced service.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -38097,8 +37913,7 @@ func schema_k8sio_api_node_v1_Overhead(ref common.ReferenceCallback) common.Open Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -38290,8 +38105,7 @@ func schema_k8sio_api_node_v1alpha1_Overhead(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -38506,8 +38320,7 @@ func schema_k8sio_api_node_v1beta1_Overhead(ref common.ReferenceCallback) common Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -38895,8 +38708,7 @@ func schema_k8sio_api_policy_v1_PodDisruptionBudgetStatus(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, }, @@ -39173,8 +38985,7 @@ func schema_k8sio_api_policy_v1beta1_PodDisruptionBudgetStatus(ref common.Refere Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, }, @@ -42446,7 +42257,7 @@ func schema_k8sio_api_storage_v1_CSIDriverSpec(ref common.ReferenceCallback) com }, "podInfoOnMount": { SchemaProps: spec.SchemaProps{ - Description: "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + Description: "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", Type: []string{"boolean"}, Format: "", }, @@ -43261,7 +43072,6 @@ func schema_k8sio_api_storage_v1_VolumeError(ref common.ReferenceCallback) commo "time": { SchemaProps: spec.SchemaProps{ Description: "time represents the time the error was encountered.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -43653,7 +43463,6 @@ func schema_k8sio_api_storage_v1alpha1_VolumeError(ref common.ReferenceCallback) "time": { SchemaProps: spec.SchemaProps{ Description: "time represents the time the error was encountered.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -43783,7 +43592,7 @@ func schema_k8sio_api_storage_v1beta1_CSIDriverSpec(ref common.ReferenceCallback }, "podInfoOnMount": { SchemaProps: spec.SchemaProps{ - Description: "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + Description: "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", Type: []string{"boolean"}, Format: "", }, @@ -44592,7 +44401,6 @@ func schema_k8sio_api_storage_v1beta1_VolumeError(ref common.ReferenceCallback) "time": { SchemaProps: spec.SchemaProps{ Description: "time represents the time the error was encountered.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -44661,8 +44469,7 @@ func schema_pkg_apis_apiextensions_v1_ConversionRequest(ref common.ReferenceCall Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -44699,8 +44506,7 @@ func schema_pkg_apis_apiextensions_v1_ConversionResponse(ref common.ReferenceCal Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -44929,7 +44735,6 @@ func schema_pkg_apis_apiextensions_v1_CustomResourceDefinitionCondition(ref comm "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -45547,8 +45352,7 @@ func schema_pkg_apis_apiextensions_v1_JSONSchemaProps(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), }, }, }, @@ -45669,8 +45473,7 @@ func schema_pkg_apis_apiextensions_v1_JSONSchemaProps(ref common.ReferenceCallba Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSONSchemaPropsOrStringArray"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSONSchemaPropsOrStringArray"), }, }, }, @@ -46026,8 +45829,7 @@ func schema_pkg_apis_apiextensions_v1beta1_ConversionRequest(ref common.Referenc Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -46064,8 +45866,7 @@ func schema_pkg_apis_apiextensions_v1beta1_ConversionResponse(ref common.Referen Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -46309,7 +46110,6 @@ func schema_pkg_apis_apiextensions_v1beta1_CustomResourceDefinitionCondition(ref "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -46960,8 +46760,7 @@ func schema_pkg_apis_apiextensions_v1beta1_JSONSchemaProps(ref common.ReferenceC Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSON"), }, }, }, @@ -47082,8 +46881,7 @@ func schema_pkg_apis_apiextensions_v1beta1_JSONSchemaProps(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSONSchemaPropsOrStringArray"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSONSchemaPropsOrStringArray"), }, }, }, @@ -47852,7 +47650,6 @@ func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.Open "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -48408,8 +48205,7 @@ func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -48695,7 +48491,6 @@ func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.Ope "creationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -49477,7 +49272,6 @@ func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenA "object": { SchemaProps: spec.SchemaProps{ Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -49676,7 +49470,6 @@ func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.Ope "object": { SchemaProps: spec.SchemaProps{ Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -50038,14 +49831,12 @@ func schema_pkg_apis_audit_v1_Event(ref common.ReferenceCallback) common.OpenAPI "requestReceivedTimestamp": { SchemaProps: spec.SchemaProps{ Description: "Time the request reached the apiserver.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, "stageTimestamp": { SchemaProps: spec.SchemaProps{ Description: "Time the request reached current audit stage.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -50139,7 +49930,7 @@ func schema_pkg_apis_audit_v1_GroupResources(ref common.ReferenceCallback) commo }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' matches pods. 'pods/log' matches the log subresource of pods. '*' matches all resources and their subresources. 'pods/*' matches all subresources of pods. '*/scale' matches all scale subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nAn empty list implies all resources and subresources in this API groups apply.", + Description: "Resources is a list of resources this rule applies to.\n\nFor example: - `pods` matches pods. - `pods/log` matches the log subresource of pods. - `*` matches all resources and their subresources. - `pods/*` matches all subresources of pods. - `*/scale` matches all scale subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nAn empty list implies all resources and subresources in this API groups apply.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -50449,7 +50240,7 @@ func schema_pkg_apis_audit_v1_PolicyRule(ref common.ReferenceCallback) common.Op }, "nonResourceURLs": { SchemaProps: spec.SchemaProps{ - Description: "NonResourceURLs is a set of URL paths that should be audited. *s are allowed, but only as the full, final step in the path. Examples:\n \"/metrics\" - Log requests for apiserver metrics\n \"/healthz*\" - Log all health checks", + Description: "NonResourceURLs is a set of URL paths that should be audited. `*`s are allowed, but only as the full, final step in the path. Examples: - `/metrics` - Log requests for apiserver metrics - `/healthz*` - Log all health checks", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -50551,7 +50342,6 @@ func schema_pkg_apis_clientauthentication_v1_Cluster(ref common.ReferenceCallbac "config": { SchemaProps: spec.SchemaProps{ Description: "Config holds additional config data that is specific to the exec plugin with regards to the cluster being authenticated to.\n\nThis data is sourced from the clientcmd Cluster object's extensions[client.authentication.k8s.io/exec] field:\n\nclusters: - name: my-cluster\n cluster:\n ...\n extensions:\n - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config\n extension:\n audience: 06e3fbd18de8 # arbitrary config\n\nIn some environments, the user config may be exactly the same across many clusters (i.e. call this exec plugin) minus some details that are specific to each cluster such as the audience. This field allows the per cluster config to be directly specified with the cluster info. Using this field to store secret data is not recommended as one of the prime benefits of exec plugins is that no secrets need to be stored directly in the kubeconfig.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -50736,7 +50526,6 @@ func schema_pkg_apis_clientauthentication_v1beta1_Cluster(ref common.ReferenceCa "config": { SchemaProps: spec.SchemaProps{ Description: "Config holds additional config data that is specific to the exec plugin with regards to the cluster being authenticated to.\n\nThis data is sourced from the clientcmd Cluster object's extensions[client.authentication.k8s.io/exec] field:\n\nclusters: - name: my-cluster\n cluster:\n ...\n extensions:\n - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config\n extension:\n audience: 06e3fbd18de8 # arbitrary config\n\nIn some environments, the user config may be exactly the same across many clusters (i.e. call this exec plugin) minus some details that are specific to each cluster such as the audience. This field allows the per cluster config to be directly specified with the cluster info. Using this field to store secret data is not recommended as one of the prime benefits of exec plugins is that no secrets need to be stored directly in the kubeconfig.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -50915,7 +50704,6 @@ func schema_k8sio_cloud_provider_config_v1alpha1_CloudControllerManagerConfigura "NodeStatusUpdateFrequency": { SchemaProps: spec.SchemaProps{ Description: "NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -51006,14 +50794,12 @@ func schema_k8sio_cloud_provider_config_v1alpha1_KubeCloudSharedConfiguration(re "RouteReconciliationPeriod": { SchemaProps: spec.SchemaProps{ Description: "routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider..", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "NodeMonitorPeriod": { SchemaProps: spec.SchemaProps{ Description: "nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -51059,7 +50845,6 @@ func schema_k8sio_cloud_provider_config_v1alpha1_KubeCloudSharedConfiguration(re "NodeSyncPeriod": { SchemaProps: spec.SchemaProps{ Description: "nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer periods will result in fewer calls to cloud provider, but may delay addition of new nodes to cluster.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -51157,7 +50942,6 @@ func schema_k8sio_controller_manager_config_v1alpha1_GenericControllerManagerCon "MinResyncPeriod": { SchemaProps: spec.SchemaProps{ Description: "minResyncPeriod is the resync period in reflectors; will be random between minResyncPeriod and 2*minResyncPeriod.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -51171,7 +50955,6 @@ func schema_k8sio_controller_manager_config_v1alpha1_GenericControllerManagerCon "ControllerStartInterval": { SchemaProps: spec.SchemaProps{ Description: "How long to wait between starting controller managers", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -51459,7 +51242,6 @@ func schema_pkg_apis_apiregistration_v1_APIServiceCondition(ref common.Reference "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -51585,7 +51367,7 @@ func schema_pkg_apis_apiregistration_v1_APIServiceSpec(ref common.ReferenceCallb }, "groupPriorityMinimum": { SchemaProps: spec.SchemaProps{ - Description: "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + Description: "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", Default: 0, Type: []string{"integer"}, Format: "int32", @@ -51757,7 +51539,6 @@ func schema_pkg_apis_apiregistration_v1beta1_APIServiceCondition(ref common.Refe "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -51883,7 +51664,7 @@ func schema_pkg_apis_apiregistration_v1beta1_APIServiceSpec(ref common.Reference }, "groupPriorityMinimum": { SchemaProps: spec.SchemaProps{ - Description: "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + Description: "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", Default: 0, Type: []string{"integer"}, Format: "int32", @@ -51996,8 +51777,7 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_AttachDetachController }, "ReconcilerSyncLoopPeriod": { SchemaProps: spec.SchemaProps{ - Description: "ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop wait between successive executions. Is set to 5 sec by default.", - Default: 0, + Description: "ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop wait between successive executions. Is set to 60 sec by default.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52094,7 +51874,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_CSRSigningControllerCo "ClusterSigningDuration": { SchemaProps: spec.SchemaProps{ Description: "clusterSigningDuration is the max length of duration signed certificates will be given. Individual CSRs may request shorter certs by setting spec.expirationSeconds.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52202,7 +51981,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_EndpointControllerConf "EndpointUpdatesBatchPeriod": { SchemaProps: spec.SchemaProps{ Description: "EndpointUpdatesBatchPeriod describes the length of endpoint updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52241,7 +52019,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_EndpointSliceControlle "EndpointUpdatesBatchPeriod": { SchemaProps: spec.SchemaProps{ Description: "EndpointUpdatesBatchPeriod describes the length of endpoint updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52280,7 +52057,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_EndpointSliceMirroring "MirroringEndpointUpdatesBatchPeriod": { SchemaProps: spec.SchemaProps{ Description: "mirroringEndpointUpdatesBatchPeriod can be used to batch EndpointSlice updates. All updates triggered by EndpointSlice changes will be delayed by up to 'mirroringEndpointUpdatesBatchPeriod'. If other addresses in the same Endpoints resource change in that period, they will be batched to a single EndpointSlice update. Default 0 value means that each Endpoints update triggers an EndpointSlice update.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52408,28 +52184,24 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_HPAControllerConfigura "HorizontalPodAutoscalerSyncPeriod": { SchemaProps: spec.SchemaProps{ Description: "HorizontalPodAutoscalerSyncPeriod is the period for syncing the number of pods in horizontal pod autoscaler.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "HorizontalPodAutoscalerUpscaleForbiddenWindow": { SchemaProps: spec.SchemaProps{ Description: "HorizontalPodAutoscalerUpscaleForbiddenWindow is a period after which next upscale allowed.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "HorizontalPodAutoscalerDownscaleStabilizationWindow": { SchemaProps: spec.SchemaProps{ Description: "HorizontalPodAutoscalerDowncaleStabilizationWindow is a period for which autoscaler will look backwards and not scale down below any recommendation it made during that period.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "HorizontalPodAutoscalerDownscaleForbiddenWindow": { SchemaProps: spec.SchemaProps{ Description: "HorizontalPodAutoscalerDownscaleForbiddenWindow is a period after which next downscale allowed.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52444,14 +52216,12 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_HPAControllerConfigura "HorizontalPodAutoscalerCPUInitializationPeriod": { SchemaProps: spec.SchemaProps{ Description: "HorizontalPodAutoscalerCPUInitializationPeriod is the period after pod start when CPU samples might be skipped.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "HorizontalPodAutoscalerInitialReadinessDelay": { SchemaProps: spec.SchemaProps{ Description: "HorizontalPodAutoscalerInitialReadinessDelay is period after pod start during which readiness changes are treated as readiness being set for the first time. The only effect of this is that HPA will disregard CPU samples from unready pods that had last readiness change during that period.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52729,7 +52499,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_LegacySATokenCleanerCo "CleanUpPeriod": { SchemaProps: spec.SchemaProps{ Description: "CleanUpPeriod is the period of time since the last usage of an auto-generated service account token before it can be deleted.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52752,7 +52521,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_NamespaceControllerCon "NamespaceSyncPeriod": { SchemaProps: spec.SchemaProps{ Description: "namespaceSyncPeriod is the period for syncing namespace life-cycle updates.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52853,21 +52621,18 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_NodeLifecycleControlle "NodeStartupGracePeriod": { SchemaProps: spec.SchemaProps{ Description: "nodeStartupGracePeriod is the amount of time which we allow starting a node to be unresponsive before marking it unhealthy.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "NodeMonitorGracePeriod": { SchemaProps: spec.SchemaProps{ Description: "nodeMontiorGracePeriod is the amount of time which we allow a running node to be unresponsive before marking it unhealthy. Must be N times more than kubelet's nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet to post node status.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "PodEvictionTimeout": { SchemaProps: spec.SchemaProps{ Description: "podEvictionTimeout is the grace period for deleting pods on failed nodes.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -52906,7 +52671,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_PersistentVolumeBinder "PVClaimBinderSyncPeriod": { SchemaProps: spec.SchemaProps{ Description: "pvClaimBinderSyncPeriod is the period for syncing persistent volumes and persistent volume claims.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -53094,7 +52858,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_ResourceQuotaControlle "ResourceQuotaSyncPeriod": { SchemaProps: spec.SchemaProps{ Description: "resourceQuotaSyncPeriod is the period for syncing quota usage status in the system.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -53273,7 +53036,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_DetectLocalConfiguration(ref common Properties: map[string]spec.Schema{ "bridgeInterface": { SchemaProps: spec.SchemaProps{ - Description: "BridgeInterface is a string argument which represents a single bridge interface name. Kube-proxy considers traffic as local if originating from this given bridge. This argument should be set if DetectLocalMode is set to LocalModeBridgeInterface.", + Description: "bridgeInterface is a bridge interface name. When DetectLocalMode is set to LocalModeBridgeInterface, kube-proxy will consider traffic to be local if it originates from this bridge.", Default: "", Type: []string{"string"}, Format: "", @@ -53281,7 +53044,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_DetectLocalConfiguration(ref common }, "interfaceNamePrefix": { SchemaProps: spec.SchemaProps{ - Description: "InterfaceNamePrefix is a string argument which represents a single interface prefix name. Kube-proxy considers traffic as local if originating from one or more interfaces which match the given prefix. This argument should be set if DetectLocalMode is set to LocalModeInterfaceNamePrefix.", + Description: "interfaceNamePrefix is an interface name prefix. When DetectLocalMode is set to LocalModeInterfaceNamePrefix, kube-proxy will consider traffic to be local if it originates from any interface whose name begins with this prefix.", Default: "", Type: []string{"string"}, Format: "", @@ -53331,9 +53094,31 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, }, }, + "clientConnection": { + SchemaProps: spec.SchemaProps{ + Description: "clientConnection specifies the kubeconfig file and client connection settings for the proxy server to use when communicating with the apiserver.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/component-base/config/v1alpha1.ClientConnectionConfiguration"), + }, + }, + "logging": { + SchemaProps: spec.SchemaProps{ + Description: "logging specifies the options of logging. Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/component-base/logs/api/v1.LoggingConfiguration"), + }, + }, + "hostnameOverride": { + SchemaProps: spec.SchemaProps{ + Description: "hostnameOverride, if non-empty, will be used as the name of the Node that kube-proxy is running on. If unset, the node name is assumed to be the same as the node's hostname.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, "bindAddress": { SchemaProps: spec.SchemaProps{ - Description: "bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)", + Description: "bindAddress can be used to override kube-proxy's idea of what its node's primary IP is. Note that the name is a historical artifact, and kube-proxy does not actually bind any sockets to this IP.", Default: "", Type: []string{"string"}, Format: "", @@ -53341,7 +53126,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, "healthzBindAddress": { SchemaProps: spec.SchemaProps{ - Description: "healthzBindAddress is the IP address and port for the health check server to serve on, defaulting to 0.0.0.0:10256", + Description: "healthzBindAddress is the IP address and port for the health check server to serve on, defaulting to \"0.0.0.0:10256\" (if bindAddress is unset or IPv4), or \"[::]:10256\" (if bindAddress is IPv6).", Default: "", Type: []string{"string"}, Format: "", @@ -53349,7 +53134,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, "metricsBindAddress": { SchemaProps: spec.SchemaProps{ - Description: "metricsBindAddress is the IP address and port for the metrics server to serve on, defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)", + Description: "metricsBindAddress is the IP address and port for the metrics server to serve on, defaulting to \"127.0.0.1:10249\" (if bindAddress is unset or IPv4), or \"[::1]:10249\" (if bindAddress is IPv6). (Set to \"0.0.0.0:10249\" / \"[::]:10249\" to bind on all interfaces.)", Default: "", Type: []string{"string"}, Format: "", @@ -53357,7 +53142,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, "bindAddressHardFail": { SchemaProps: spec.SchemaProps{ - Description: "bindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit", + Description: "bindAddressHardFail, if true, tells kube-proxy to treat failure to bind to a port as fatal and exit", Default: false, Type: []string{"boolean"}, Format: "", @@ -53371,29 +53156,22 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R Format: "", }, }, - "clusterCIDR": { + "showHiddenMetricsForVersion": { SchemaProps: spec.SchemaProps{ - Description: "clusterCIDR is the CIDR range of the pods in the cluster. It is used to bridge traffic coming from outside of the cluster. If not provided, no off-cluster bridging will be performed.", + Description: "showHiddenMetricsForVersion is the version for which you want to show hidden metrics.", Default: "", Type: []string{"string"}, Format: "", }, }, - "hostnameOverride": { + "mode": { SchemaProps: spec.SchemaProps{ - Description: "hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.", + Description: "mode specifies which proxy mode to use.", Default: "", Type: []string{"string"}, Format: "", }, }, - "clientConnection": { - SchemaProps: spec.SchemaProps{ - Description: "clientConnection specifies the kubeconfig file and client connection settings for the proxy server to use when communicating with the apiserver.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/component-base/config/v1alpha1.ClientConnectionConfiguration"), - }, - }, "iptables": { SchemaProps: spec.SchemaProps{ Description: "iptables contains iptables-related configuration options.", @@ -53408,46 +53186,39 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R Ref: ref("k8s.io/kube-proxy/config/v1alpha1.KubeProxyIPVSConfiguration"), }, }, - "oomScoreAdj": { - SchemaProps: spec.SchemaProps{ - Description: "oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "mode": { + "winkernel": { SchemaProps: spec.SchemaProps{ - Description: "mode specifies which proxy mode to use.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "winkernel contains winkernel-related configuration options.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/kube-proxy/config/v1alpha1.KubeProxyWinkernelConfiguration"), }, }, - "portRange": { + "detectLocalMode": { SchemaProps: spec.SchemaProps{ - Description: "portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.", + Description: "detectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR", Default: "", Type: []string{"string"}, Format: "", }, }, - "conntrack": { + "detectLocal": { SchemaProps: spec.SchemaProps{ - Description: "conntrack contains conntrack-related configuration options.", + Description: "detectLocal contains optional configuration settings related to DetectLocalMode.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/kube-proxy/config/v1alpha1.KubeProxyConntrackConfiguration"), + Ref: ref("k8s.io/kube-proxy/config/v1alpha1.DetectLocalConfiguration"), }, }, - "configSyncPeriod": { + "clusterCIDR": { SchemaProps: spec.SchemaProps{ - Description: "configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater than 0.", - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "clusterCIDR is the CIDR range of the pods in the cluster. (For dual-stack clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When DetectLocalMode is set to LocalModeClusterCIDR, kube-proxy will consider traffic to be local if its source IP is in this range. (Otherwise it is not used.)", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "nodePortAddresses": { SchemaProps: spec.SchemaProps{ - Description: "nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid IP blocks. These values are as a parameter to select the interfaces where nodeport works. In case someone would like to expose a service on localhost for local visit and some other interfaces for particular purpose, a list of IP blocks would do that. If set it to \"127.0.0.0/8\", kube-proxy will only select the loopback interface for NodePort. If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. An empty string slice is meant to select all network interfaces.", + Description: "nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -53460,45 +53231,36 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, }, }, - "winkernel": { + "oomScoreAdj": { SchemaProps: spec.SchemaProps{ - Description: "winkernel contains winkernel-related configuration options.", + Description: "oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "conntrack": { + SchemaProps: spec.SchemaProps{ + Description: "conntrack contains conntrack-related configuration options.", Default: map[string]interface{}{}, - Ref: ref("k8s.io/kube-proxy/config/v1alpha1.KubeProxyWinkernelConfiguration"), + Ref: ref("k8s.io/kube-proxy/config/v1alpha1.KubeProxyConntrackConfiguration"), }, }, - "showHiddenMetricsForVersion": { + "configSyncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater than 0.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "detectLocalMode": { + "portRange": { SchemaProps: spec.SchemaProps{ - Description: "DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR", + Description: "portRange was previously used to configure the userspace proxy, but is now unused.", Default: "", Type: []string{"string"}, Format: "", }, }, - "detectLocal": { - SchemaProps: spec.SchemaProps{ - Description: "DetectLocal contains optional configuration settings related to DetectLocalMode.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/kube-proxy/config/v1alpha1.DetectLocalConfiguration"), - }, - }, - "logging": { - SchemaProps: spec.SchemaProps{ - Description: "logging specifies the options of logging. Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/component-base/logs/api/v1.LoggingConfiguration"), - }, - }, }, - Required: []string{"bindAddress", "healthzBindAddress", "metricsBindAddress", "bindAddressHardFail", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "conntrack", "configSyncPeriod", "nodePortAddresses", "winkernel", "showHiddenMetricsForVersion", "detectLocalMode", "detectLocal"}, + Required: []string{"clientConnection", "hostnameOverride", "bindAddress", "healthzBindAddress", "metricsBindAddress", "bindAddressHardFail", "enableProfiling", "showHiddenMetricsForVersion", "mode", "iptables", "ipvs", "winkernel", "detectLocalMode", "detectLocal", "clusterCIDR", "nodePortAddresses", "oomScoreAdj", "conntrack", "configSyncPeriod", "portRange"}, }, }, Dependencies: []string{ @@ -53522,7 +53284,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConntrackConfiguration(ref }, "min": { SchemaProps: spec.SchemaProps{ - Description: "min is the minimum value of connect-tracking records to allocate, regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).", + Description: "min is the minimum value of connect-tracking records to allocate, regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is).", Type: []string{"integer"}, Format: "int32", }, @@ -53539,8 +53301,20 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConntrackConfiguration(ref Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, + "udpTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "udpTimeout is how long an idle UDP conntrack entry in UNREPLIED state will remain in the conntrack table (e.g. '30s'). Must be greater than 0 to set.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "udpStreamTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "udpStreamTimeout is how long an idle UDP conntrack entry in ASSURED state will remain in the conntrack table (e.g. '300s'). Must be greater than 0 to set.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, }, - Required: []string{"maxPerCore", "min", "tcpEstablishedTimeout", "tcpCloseWaitTimeout"}, + Required: []string{"maxPerCore", "min", "tcpEstablishedTimeout", "tcpCloseWaitTimeout", "udpTimeout", "udpStreamTimeout"}, }, }, Dependencies: []string{ @@ -53557,14 +53331,14 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyIPTablesConfiguration(ref Properties: map[string]spec.Schema{ "masqueradeBit": { SchemaProps: spec.SchemaProps{ - Description: "masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using the pure iptables proxy mode. Values must be within the range [0, 31].", + Description: "masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using the iptables or ipvs proxy mode. Values must be within the range [0, 31].", Type: []string{"integer"}, Format: "int32", }, }, "masqueradeAll": { SchemaProps: spec.SchemaProps{ - Description: "masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.", + Description: "masqueradeAll tells kube-proxy to SNAT all traffic sent to Service cluster IPs, when using the iptables or ipvs proxy mode. This may be required with some CNI plugins.", Default: false, Type: []string{"boolean"}, Format: "", @@ -53572,22 +53346,20 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyIPTablesConfiguration(ref }, "localhostNodePorts": { SchemaProps: spec.SchemaProps{ - Description: "LocalhostNodePorts tells kube-proxy to allow service NodePorts to be accessed via localhost (iptables mode only)", + Description: "localhostNodePorts, if false, tells kube-proxy to disable the legacy behavior of allowing NodePort services to be accessed via localhost. (Applies only to iptables mode and IPv4; localhost NodePorts are never allowed with other proxy modes or with IPv6.)", Type: []string{"boolean"}, Format: "", }, }, "syncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.", - Default: 0, + Description: "syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "minSyncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', '2h22m').", - Default: 0, + Description: "minSyncPeriod is the minimum period between iptables rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate iptables resync.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -53609,21 +53381,19 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyIPVSConfiguration(ref comm Properties: map[string]spec.Schema{ "syncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.", - Default: 0, + Description: "syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "minSyncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', '2h22m').", - Default: 0, + Description: "minSyncPeriod is the minimum period between IPVS rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate IPVS resync.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "scheduler": { SchemaProps: spec.SchemaProps{ - Description: "ipvs scheduler", + Description: "scheduler is the IPVS scheduler to use", Default: "", Type: []string{"string"}, Format: "", @@ -53631,7 +53401,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyIPVSConfiguration(ref comm }, "excludeCIDRs": { SchemaProps: spec.SchemaProps{ - Description: "excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch when cleaning up ipvs services.", + Description: "excludeCIDRs is a list of CIDRs which the ipvs proxier should not touch when cleaning up ipvs services.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -53646,7 +53416,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyIPVSConfiguration(ref comm }, "strictARP": { SchemaProps: spec.SchemaProps{ - Description: "strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface", + Description: "strictARP configures arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface", Default: false, Type: []string{"boolean"}, Format: "", @@ -53655,21 +53425,18 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyIPVSConfiguration(ref comm "tcpTimeout": { SchemaProps: spec.SchemaProps{ Description: "tcpTimeout is the timeout value used for idle IPVS TCP sessions. The default value is 0, which preserves the current timeout value on the system.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "tcpFinTimeout": { SchemaProps: spec.SchemaProps{ Description: "tcpFinTimeout is the timeout value used for IPVS TCP sessions after receiving a FIN. The default value is 0, which preserves the current timeout value on the system.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "udpTimeout": { SchemaProps: spec.SchemaProps{ Description: "udpTimeout is the timeout value used for IPVS UDP packets. The default value is 0, which preserves the current timeout value on the system.", - Default: 0, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -53699,7 +53466,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyWinkernelConfiguration(ref }, "sourceVip": { SchemaProps: spec.SchemaProps{ - Description: "sourceVip is the IP address of the source VIP endoint used for NAT when loadbalancing", + Description: "sourceVip is the IP address of the source VIP endpoint used for NAT when loadbalancing", Default: "", Type: []string{"string"}, Format: "", @@ -53715,7 +53482,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyWinkernelConfiguration(ref }, "rootHnsEndpointName": { SchemaProps: spec.SchemaProps{ - Description: "RootHnsEndpointName is the name of hnsendpoint that is attached to l2bridge for root network namespace", + Description: "rootHnsEndpointName is the name of hnsendpoint that is attached to l2bridge for root network namespace", Default: "", Type: []string{"string"}, Format: "", @@ -53723,7 +53490,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyWinkernelConfiguration(ref }, "forwardHealthCheckVip": { SchemaProps: spec.SchemaProps{ - Description: "ForwardHealthCheckVip forwards service VIP for health check port on Windows", + Description: "forwardHealthCheckVip forwards service VIP for health check port on Windows", Default: false, Type: []string{"boolean"}, Format: "", @@ -54443,7 +54210,6 @@ func schema_k8sio_kube_scheduler_config_v1_PluginConfig(ref common.ReferenceCall "args": { SchemaProps: spec.SchemaProps{ Description: "Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -55699,6 +55465,12 @@ func schema_k8sio_kubelet_config_v1beta1_KubeletConfiguration(ref common.Referen Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, + "imageMaximumGCAge": { + SchemaProps: spec.SchemaProps{ + Description: "imageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. The default of this field is \"0s\", which disables this field--meaning images won't be garbage collected based on being unused for too long. Default: \"0s\" (disabled)", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, "imageGCHighThresholdPercent": { SchemaProps: spec.SchemaProps{ Description: "imageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run. The percent is calculated by dividing this field value by 100, so this field must be between 0 and 100, inclusive. When specified, the value must be greater than imageGCLowThresholdPercent. Default: 85", @@ -56467,8 +56239,7 @@ func schema_k8sio_kubelet_config_v1beta1_MemoryReservation(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -56747,7 +56518,6 @@ func schema_pkg_apis_custom_metrics_v1beta1_MetricValue(ref common.ReferenceCall "timestamp": { SchemaProps: spec.SchemaProps{ Description: "indicates the time at which the metrics were produced", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -56761,7 +56531,6 @@ func schema_pkg_apis_custom_metrics_v1beta1_MetricValue(ref common.ReferenceCall "value": { SchemaProps: spec.SchemaProps{ Description: "the value of the metric for this", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -56938,7 +56707,6 @@ func schema_pkg_apis_custom_metrics_v1beta2_MetricValue(ref common.ReferenceCall "timestamp": { SchemaProps: spec.SchemaProps{ Description: "indicates the time at which the metrics were produced", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -56952,7 +56720,6 @@ func schema_pkg_apis_custom_metrics_v1beta2_MetricValue(ref common.ReferenceCall "value": { SchemaProps: spec.SchemaProps{ Description: "the value of the metric for this", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -57063,7 +56830,6 @@ func schema_pkg_apis_external_metrics_v1beta1_ExternalMetricValue(ref common.Ref "timestamp": { SchemaProps: spec.SchemaProps{ Description: "indicates the time at which the metrics were produced", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -57077,7 +56843,6 @@ func schema_pkg_apis_external_metrics_v1beta1_ExternalMetricValue(ref common.Ref "value": { SchemaProps: spec.SchemaProps{ Description: "the value of the metric", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -57163,8 +56928,7 @@ func schema_pkg_apis_metrics_v1alpha1_ContainerMetrics(ref common.ReferenceCallb Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -57210,14 +56974,12 @@ func schema_pkg_apis_metrics_v1alpha1_NodeMetrics(ref common.ReferenceCallback) "timestamp": { SchemaProps: spec.SchemaProps{ Description: "The following fields define time interval from which metrics were collected from the interval [Timestamp-Window, Timestamp].", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "window": { SchemaProps: spec.SchemaProps{ - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "usage": { @@ -57228,8 +56990,7 @@ func schema_pkg_apis_metrics_v1alpha1_NodeMetrics(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -57326,14 +57087,12 @@ func schema_pkg_apis_metrics_v1alpha1_PodMetrics(ref common.ReferenceCallback) c "timestamp": { SchemaProps: spec.SchemaProps{ Description: "The following fields define time interval from which metrics were collected from the interval [Timestamp-Window, Timestamp].", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "window": { SchemaProps: spec.SchemaProps{ - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "containers": { @@ -57433,8 +57192,7 @@ func schema_pkg_apis_metrics_v1beta1_ContainerMetrics(ref common.ReferenceCallba Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -57480,14 +57238,12 @@ func schema_pkg_apis_metrics_v1beta1_NodeMetrics(ref common.ReferenceCallback) c "timestamp": { SchemaProps: spec.SchemaProps{ Description: "The following fields define time interval from which metrics were collected from the interval [Timestamp-Window, Timestamp].", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "window": { SchemaProps: spec.SchemaProps{ - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "usage": { @@ -57498,8 +57254,7 @@ func schema_pkg_apis_metrics_v1beta1_NodeMetrics(ref common.ReferenceCallback) c Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -57596,14 +57351,12 @@ func schema_pkg_apis_metrics_v1beta1_PodMetrics(ref common.ReferenceCallback) co "timestamp": { SchemaProps: spec.SchemaProps{ Description: "The following fields define time interval from which metrics were collected from the interval [Timestamp-Window, Timestamp].", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "window": { SchemaProps: spec.SchemaProps{ - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, "containers": { diff --git a/pkg/kubeapiserver/authenticator/config.go b/pkg/kubeapiserver/authenticator/config.go index 5f77d2babc126..9d32d00d9bae4 100644 --- a/pkg/kubeapiserver/authenticator/config.go +++ b/pkg/kubeapiserver/authenticator/config.go @@ -41,6 +41,7 @@ import ( "k8s.io/apiserver/plugin/pkg/authenticator/token/oidc" "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook" typedv1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" // Initialize all known client auth plugins. @@ -89,10 +90,11 @@ type Config struct { // New returns an authenticator.Request or an error that supports the standard // Kubernetes authentication mechanisms. -func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, error) { +func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, spec3.SecuritySchemes, error) { var authenticators []authenticator.Request var tokenAuthenticators []authenticator.Token - securityDefinitions := spec.SecurityDefinitions{} + securityDefinitionsV2 := spec.SecurityDefinitions{} + securitySchemesV3 := spec3.SecuritySchemes{} // front-proxy, BasicAuth methods, local first, then remote // Add the front proxy authenticator if requested @@ -117,21 +119,21 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er if len(config.TokenAuthFile) > 0 { tokenAuth, err := newAuthenticatorFromTokenFile(config.TokenAuthFile) if err != nil { - return nil, nil, err + return nil, nil, nil, err } tokenAuthenticators = append(tokenAuthenticators, authenticator.WrapAudienceAgnosticToken(config.APIAudiences, tokenAuth)) } if len(config.ServiceAccountKeyFiles) > 0 { serviceAccountAuth, err := newLegacyServiceAccountAuthenticator(config.ServiceAccountKeyFiles, config.ServiceAccountLookup, config.APIAudiences, config.ServiceAccountTokenGetter, config.SecretsWriter) if err != nil { - return nil, nil, err + return nil, nil, nil, err } tokenAuthenticators = append(tokenAuthenticators, serviceAccountAuth) } if len(config.ServiceAccountIssuers) > 0 { serviceAccountAuth, err := newServiceAccountAuthenticator(config.ServiceAccountIssuers, config.ServiceAccountKeyFiles, config.APIAudiences, config.ServiceAccountTokenGetter) if err != nil { - return nil, nil, err + return nil, nil, nil, err } tokenAuthenticators = append(tokenAuthenticators, serviceAccountAuth) } @@ -153,7 +155,7 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er var oidcCAError error oidcCAContent, oidcCAError = dynamiccertificates.NewStaticCAContent("oidc-authenticator", []byte(jwtAuthenticator.Issuer.CertificateAuthority)) if oidcCAError != nil { - return nil, nil, oidcCAError + return nil, nil, nil, oidcCAError } } oidcAuth, err := oidc.New(oidc.Options{ @@ -162,7 +164,7 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er SupportedSigningAlgs: config.OIDCSigningAlgs, }) if err != nil { - return nil, nil, err + return nil, nil, nil, err } tokenAuthenticators = append(tokenAuthenticators, authenticator.WrapAudienceAgnosticToken(config.APIAudiences, oidcAuth)) } @@ -171,7 +173,7 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er if len(config.WebhookTokenAuthnConfigFile) > 0 { webhookTokenAuth, err := newWebhookTokenAuthenticator(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } tokenAuthenticators = append(tokenAuthenticators, webhookTokenAuth) @@ -185,7 +187,8 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er tokenAuth = tokencache.New(tokenAuth, true, config.TokenSuccessCacheTTL, config.TokenFailureCacheTTL) } authenticators = append(authenticators, bearertoken.New(tokenAuth), websocket.NewProtocolAuthenticator(tokenAuth)) - securityDefinitions["BearerToken"] = &spec.SecurityScheme{ + + securityDefinitionsV2["BearerToken"] = &spec.SecurityScheme{ SecuritySchemeProps: spec.SecuritySchemeProps{ Type: "apiKey", Name: "authorization", @@ -193,13 +196,21 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er Description: "Bearer Token authentication", }, } + securitySchemesV3["BearerToken"] = &spec3.SecurityScheme{ + SecuritySchemeProps: spec3.SecuritySchemeProps{ + Type: "apiKey", + Name: "authorization", + In: "header", + Description: "Bearer Token authentication", + }, + } } if len(authenticators) == 0 { if config.Anonymous { - return anonymous.NewAuthenticator(), &securityDefinitions, nil + return anonymous.NewAuthenticator(), &securityDefinitionsV2, securitySchemesV3, nil } - return nil, &securityDefinitions, nil + return nil, &securityDefinitionsV2, securitySchemesV3, nil } authenticator := union.New(authenticators...) @@ -212,7 +223,7 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er authenticator = union.NewFailOnError(authenticator, anonymous.NewAuthenticator()) } - return authenticator, &securityDefinitions, nil + return authenticator, &securityDefinitionsV2, securitySchemesV3, nil } // IsValidServiceAccountKeyFile returns true if a valid public RSA key can be read from the given file diff --git a/pkg/kubeapiserver/authorizer/config.go b/pkg/kubeapiserver/authorizer/config.go index f9c6aeabc8638..c33e2fa24d3dd 100644 --- a/pkg/kubeapiserver/authorizer/config.go +++ b/pkg/kubeapiserver/authorizer/config.go @@ -19,10 +19,9 @@ package authorizer import ( "errors" "fmt" - "time" - utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" + authzconfig "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" @@ -40,8 +39,6 @@ import ( // Config contains the data on how to authorize a request to the Kube API Server type Config struct { - AuthorizationModes []string - // Options for ModeABAC // Path to an ABAC policy file. @@ -49,14 +46,6 @@ type Config struct { // Options for ModeWebhook - // Kubeconfig file for Webhook authorization plugin. - WebhookConfigFile string - // API version of subject access reviews to send to the webhook (e.g. "v1", "v1beta1") - WebhookVersion string - // TTL for caching of authorized responses from the webhook server. - WebhookCacheAuthorizedTTL time.Duration - // TTL for caching of unauthorized responses from the webhook server. - WebhookCacheUnauthorizedTTL time.Duration // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. @@ -66,12 +55,16 @@ type Config struct { // Optional field, custom dial function used to connect to webhook CustomDial utilnet.DialFunc + + // AuthorizationConfiguration stores the configuration for the Authorizer chain + // It will deprecate most of the above flags when GA + AuthorizationConfiguration *authzconfig.AuthorizationConfiguration } // New returns the right sort of union of multiple authorizer.Authorizer objects // based on the authorizationMode or an error. func (config Config) New() (authorizer.Authorizer, authorizer.RuleResolver, error) { - if len(config.AuthorizationModes) == 0 { + if len(config.AuthorizationConfiguration.Authorizers) == 0 { return nil, nil, fmt.Errorf("at least one authorization mode must be passed") } @@ -84,10 +77,10 @@ func (config Config) New() (authorizer.Authorizer, authorizer.RuleResolver, erro superuserAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) authorizers = append(authorizers, superuserAuthorizer) - for _, authorizationMode := range config.AuthorizationModes { + for _, configuredAuthorizer := range config.AuthorizationConfiguration.Authorizers { // Keep cases in sync with constant list in k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go. - switch authorizationMode { - case modes.ModeNode: + switch configuredAuthorizer.Type { + case authzconfig.AuthorizerType(modes.ModeNode): node.RegisterMetrics() graph := node.NewGraph() node.AddGraphEventHandlers( @@ -101,33 +94,33 @@ func (config Config) New() (authorizer.Authorizer, authorizer.RuleResolver, erro authorizers = append(authorizers, nodeAuthorizer) ruleResolvers = append(ruleResolvers, nodeAuthorizer) - case modes.ModeAlwaysAllow: + case authzconfig.AuthorizerType(modes.ModeAlwaysAllow): alwaysAllowAuthorizer := authorizerfactory.NewAlwaysAllowAuthorizer() authorizers = append(authorizers, alwaysAllowAuthorizer) ruleResolvers = append(ruleResolvers, alwaysAllowAuthorizer) - case modes.ModeAlwaysDeny: + case authzconfig.AuthorizerType(modes.ModeAlwaysDeny): alwaysDenyAuthorizer := authorizerfactory.NewAlwaysDenyAuthorizer() authorizers = append(authorizers, alwaysDenyAuthorizer) ruleResolvers = append(ruleResolvers, alwaysDenyAuthorizer) - case modes.ModeABAC: + case authzconfig.AuthorizerType(modes.ModeABAC): abacAuthorizer, err := abac.NewFromFile(config.PolicyFile) if err != nil { return nil, nil, err } authorizers = append(authorizers, abacAuthorizer) ruleResolvers = append(ruleResolvers, abacAuthorizer) - case modes.ModeWebhook: + case authzconfig.AuthorizerType(modes.ModeWebhook): if config.WebhookRetryBackoff == nil { return nil, nil, errors.New("retry backoff parameters for authorization webhook has not been specified") } - clientConfig, err := webhookutil.LoadKubeconfig(config.WebhookConfigFile, config.CustomDial) + clientConfig, err := webhookutil.LoadKubeconfig(*configuredAuthorizer.Webhook.ConnectionInfo.KubeConfigFile, config.CustomDial) if err != nil { return nil, nil, err } webhookAuthorizer, err := webhook.New(clientConfig, - config.WebhookVersion, - config.WebhookCacheAuthorizedTTL, - config.WebhookCacheUnauthorizedTTL, + configuredAuthorizer.Webhook.SubjectAccessReviewVersion, + configuredAuthorizer.Webhook.AuthorizedTTL.Duration, + configuredAuthorizer.Webhook.UnauthorizedTTL.Duration, *config.WebhookRetryBackoff, ) if err != nil { @@ -135,7 +128,7 @@ func (config Config) New() (authorizer.Authorizer, authorizer.RuleResolver, erro } authorizers = append(authorizers, webhookAuthorizer) ruleResolvers = append(ruleResolvers, webhookAuthorizer) - case modes.ModeRBAC: + case authzconfig.AuthorizerType(modes.ModeRBAC): rbacAuthorizer := rbac.New( &rbac.RoleGetter{Lister: config.VersionedInformerFactory.Rbac().V1().Roles().Lister()}, &rbac.RoleBindingLister{Lister: config.VersionedInformerFactory.Rbac().V1().RoleBindings().Lister()}, @@ -145,7 +138,7 @@ func (config Config) New() (authorizer.Authorizer, authorizer.RuleResolver, erro authorizers = append(authorizers, rbacAuthorizer) ruleResolvers = append(ruleResolvers, rbacAuthorizer) default: - return nil, nil, fmt.Errorf("unknown authorization mode %s specified", authorizationMode) + return nil, nil, fmt.Errorf("unknown authorization mode %s specified", configuredAuthorizer.Type) } } diff --git a/pkg/kubeapiserver/default_storage_factory_builder.go b/pkg/kubeapiserver/default_storage_factory_builder.go index f1946415d85c2..b8cd6413cef1c 100644 --- a/pkg/kubeapiserver/default_storage_factory_builder.go +++ b/pkg/kubeapiserver/default_storage_factory_builder.go @@ -71,7 +71,6 @@ func NewStorageFactoryConfig() *StorageFactoryConfig { // apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"), admissionregistration.Resource("validatingadmissionpolicies").WithVersion("v1beta1"), admissionregistration.Resource("validatingadmissionpolicybindings").WithVersion("v1beta1"), - networking.Resource("clustercidrs").WithVersion("v1alpha1"), networking.Resource("ipaddresses").WithVersion("v1alpha1"), certificates.Resource("clustertrustbundles").WithVersion("v1alpha1"), } diff --git a/pkg/kubeapiserver/options/admission.go b/pkg/kubeapiserver/options/admission.go index 68b31cc58decb..c58c2e9080a32 100644 --- a/pkg/kubeapiserver/options/admission.go +++ b/pkg/kubeapiserver/options/admission.go @@ -67,6 +67,9 @@ func NewAdmissionOptions() *AdmissionOptions { // AddFlags adds flags related to admission for kube-apiserver to the specified FlagSet func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { + if a == nil { + return + } fs.StringSliceVar(&a.PluginNames, "admission-control", a.PluginNames, ""+ "Admission is divided into two phases. "+ "In the first phase, only mutating admission plugins run. "+ diff --git a/pkg/kubeapiserver/options/authentication.go b/pkg/kubeapiserver/options/authentication.go index fe7753ab0d1e5..b7fce0e08eba6 100644 --- a/pkg/kubeapiserver/options/authentication.go +++ b/pkg/kubeapiserver/options/authentication.go @@ -210,6 +210,10 @@ func (o *BuiltInAuthenticationOptions) WithWebHook() *BuiltInAuthenticationOptio // Validate checks invalid config combination func (o *BuiltInAuthenticationOptions) Validate() []error { + if o == nil { + return nil + } + var allErrors []error allErrors = append(allErrors, o.validateOIDCOptions()...) @@ -270,6 +274,10 @@ func (o *BuiltInAuthenticationOptions) Validate() []error { // AddFlags returns flags of authentication for a API Server func (o *BuiltInAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + fs.StringSliceVar(&o.APIAudiences, "api-audiences", o.APIAudiences, ""+ "Identifiers of the API. The service account token authenticator will validate that "+ "tokens used against the API are bound to at least one of these audiences. If the "+ @@ -416,8 +424,13 @@ func (o *BuiltInAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { } } -// ToAuthenticationConfig convert BuiltInAuthenticationOptions to kubeauthenticator.Config +// ToAuthenticationConfig convert BuiltInAuthenticationOptions to kubeauthenticator.Config. Returns +// an empty config if o is nil. func (o *BuiltInAuthenticationOptions) ToAuthenticationConfig() (kubeauthenticator.Config, error) { + if o == nil { + return kubeauthenticator.Config{}, nil + } + ret := kubeauthenticator.Config{ TokenSuccessCacheTTL: o.TokenSuccessCacheTTL, TokenFailureCacheTTL: o.TokenFailureCacheTTL, @@ -556,7 +569,7 @@ func (o *BuiltInAuthenticationOptions) ToAuthenticationConfig() (kubeauthenticat } // ApplyTo requires already applied OpenAPIConfig and EgressSelector if present. -func (o *BuiltInAuthenticationOptions) ApplyTo(authInfo *genericapiserver.AuthenticationInfo, secureServing *genericapiserver.SecureServingInfo, egressSelector *egressselector.EgressSelector, openAPIConfig *openapicommon.Config, openAPIV3Config *openapicommon.Config, extclient kubernetes.Interface, versionedInformer informers.SharedInformerFactory) error { +func (o *BuiltInAuthenticationOptions) ApplyTo(authInfo *genericapiserver.AuthenticationInfo, secureServing *genericapiserver.SecureServingInfo, egressSelector *egressselector.EgressSelector, openAPIConfig *openapicommon.Config, openAPIV3Config *openapicommon.OpenAPIV3Config, extclient kubernetes.Interface, versionedInformer informers.SharedInformerFactory) error { if o == nil { return nil } @@ -609,14 +622,16 @@ func (o *BuiltInAuthenticationOptions) ApplyTo(authInfo *genericapiserver.Authen authenticatorConfig.CustomDial = egressDialer } - authInfo.Authenticator, openAPIConfig.SecurityDefinitions, err = authenticatorConfig.New() - if openAPIV3Config != nil { - openAPIV3Config.SecurityDefinitions = openAPIConfig.SecurityDefinitions - } + // var openAPIV3SecuritySchemes spec3.SecuritySchemes + authenticator, openAPIV2SecurityDefinitions, openAPIV3SecuritySchemes, err := authenticatorConfig.New() if err != nil { return err } - + authInfo.Authenticator = authenticator + openAPIConfig.SecurityDefinitions = openAPIV2SecurityDefinitions + if openAPIV3Config != nil { + openAPIV3Config.SecuritySchemes = openAPIV3SecuritySchemes + } return nil } diff --git a/pkg/kubeapiserver/options/authorization.go b/pkg/kubeapiserver/options/authorization.go index 778ca1210d70f..4e9e24fb729ea 100644 --- a/pkg/kubeapiserver/options/authorization.go +++ b/pkg/kubeapiserver/options/authorization.go @@ -21,16 +21,38 @@ import ( "strings" "time" + "k8s.io/apiserver/pkg/apis/apiserver/load" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + authzconfig "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/apis/apiserver/validation" genericoptions "k8s.io/apiserver/pkg/server/options" versionedinformers "k8s.io/client-go/informers" + "k8s.io/kubernetes/pkg/kubeapiserver/authorizer" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" ) +const ( + defaultWebhookName = "default" + authorizationModeFlag = "authorization-mode" + authorizationWebhookConfigFileFlag = "authorization-webhook-config-file" + authorizationWebhookVersionFlag = "authorization-webhook-version" + authorizationWebhookAuthorizedTTLFlag = "authorization-webhook-cache-authorized-ttl" + authorizationWebhookUnauthorizedTTLFlag = "authorization-webhook-cache-unauthorized-ttl" + authorizationPolicyFileFlag = "authorization-policy-file" + authorizationConfigFlag = "authorization-config" +) + +// RepeatableAuthorizerTypes is the list of Authorizer that can be repeated in the Authorization Config +var repeatableAuthorizerTypes = []string{authzmodes.ModeWebhook} + // BuiltInAuthorizationOptions contains all build-in authorization options for API Server type BuiltInAuthorizationOptions struct { Modes []string @@ -43,6 +65,16 @@ type BuiltInAuthorizationOptions struct { // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. WebhookRetryBackoff *wait.Backoff + + // AuthorizationConfigurationFile is mutually exclusive with all of: + // - Modes + // - WebhookConfigFile + // - WebHookVersion + // - WebhookCacheAuthorizedTTL + // - WebhookCacheUnauthorizedTTL + AuthorizationConfigurationFile string + + AreLegacyFlagsSet func() bool } // NewBuiltInAuthorizationOptions create a BuiltInAuthorizationOptions with default value @@ -63,6 +95,53 @@ func (o *BuiltInAuthorizationOptions) Validate() []error { } var allErrors []error + // if --authorization-config is set, check if + // - the feature flag is set + // - legacyFlags are not set + // - the config file can be loaded + // - the config file represents a valid configuration + if o.AuthorizationConfigurationFile != "" { + if !utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StructuredAuthorizationConfiguration) { + return append(allErrors, fmt.Errorf("--%s cannot be used without enabling StructuredAuthorizationConfiguration feature flag", authorizationConfigFlag)) + } + + // error out if legacy flags are defined + if o.AreLegacyFlagsSet != nil && o.AreLegacyFlagsSet() { + return append(allErrors, fmt.Errorf("--%s can not be specified when --%s or --authorization-webhook-* flags are defined", authorizationConfigFlag, authorizationModeFlag)) + } + + // load the file and check for errors + config, err := load.LoadFromFile(o.AuthorizationConfigurationFile) + if err != nil { + return append(allErrors, fmt.Errorf("failed to load AuthorizationConfiguration from file: %v", err)) + } + + // validate the file and return any error + if errors := validation.ValidateAuthorizationConfiguration(nil, config, + sets.NewString(authzmodes.AuthorizationModeChoices...), + sets.NewString(repeatableAuthorizerTypes...), + ); len(errors) != 0 { + allErrors = append(allErrors, errors.ToAggregate().Errors()...) + } + + // test to check if the authorizer names passed conform to the authorizers for type!=Webhook + // this test is only for kube-apiserver and hence checked here + // it preserves compatibility with o.buildAuthorizationConfiguration + for _, authorizer := range config.Authorizers { + if string(authorizer.Type) == authzmodes.ModeWebhook { + continue + } + + expectedName := getNameForAuthorizerMode(string(authorizer.Type)) + if expectedName != authorizer.Name { + allErrors = append(allErrors, fmt.Errorf("expected name %s for authorizer %s instead of %s", expectedName, authorizer.Type, authorizer.Name)) + } + } + + return allErrors + } + + // validate the legacy flags using the legacy mode if --authorization-config is not passed if len(o.Modes) == 0 { allErrors = append(allErrors, fmt.Errorf("at least one authorization-mode must be passed")) } @@ -101,39 +180,149 @@ func (o *BuiltInAuthorizationOptions) Validate() []error { // AddFlags returns flags of authorization for a API Server func (o *BuiltInAuthorizationOptions) AddFlags(fs *pflag.FlagSet) { - fs.StringSliceVar(&o.Modes, "authorization-mode", o.Modes, ""+ + if o == nil { + return + } + + fs.StringSliceVar(&o.Modes, authorizationModeFlag, o.Modes, ""+ "Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: "+ strings.Join(authzmodes.AuthorizationModeChoices, ",")+".") - fs.StringVar(&o.PolicyFile, "authorization-policy-file", o.PolicyFile, ""+ + fs.StringVar(&o.PolicyFile, authorizationPolicyFileFlag, o.PolicyFile, ""+ "File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port.") - fs.StringVar(&o.WebhookConfigFile, "authorization-webhook-config-file", o.WebhookConfigFile, ""+ + fs.StringVar(&o.WebhookConfigFile, authorizationWebhookConfigFileFlag, o.WebhookConfigFile, ""+ "File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. "+ "The API server will query the remote service to determine access on the API server's secure port.") - fs.StringVar(&o.WebhookVersion, "authorization-webhook-version", o.WebhookVersion, ""+ + fs.StringVar(&o.WebhookVersion, authorizationWebhookVersionFlag, o.WebhookVersion, ""+ "The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook.") - fs.DurationVar(&o.WebhookCacheAuthorizedTTL, "authorization-webhook-cache-authorized-ttl", + fs.DurationVar(&o.WebhookCacheAuthorizedTTL, authorizationWebhookAuthorizedTTLFlag, o.WebhookCacheAuthorizedTTL, "The duration to cache 'authorized' responses from the webhook authorizer.") fs.DurationVar(&o.WebhookCacheUnauthorizedTTL, - "authorization-webhook-cache-unauthorized-ttl", o.WebhookCacheUnauthorizedTTL, + authorizationWebhookUnauthorizedTTLFlag, o.WebhookCacheUnauthorizedTTL, "The duration to cache 'unauthorized' responses from the webhook authorizer.") + + fs.StringVar(&o.AuthorizationConfigurationFile, authorizationConfigFlag, o.AuthorizationConfigurationFile, ""+ + "File with Authorization Configuration to configure the authorizer chain."+ + "Note: This feature is in Alpha since v1.29."+ + "--feature-gate=StructuredAuthorizationConfiguration=true feature flag needs to be set to true for enabling the functionality."+ + "This feature is mutually exclusive with the other --authorization-mode and --authorization-webhook-* flags.") + + // preserves compatibility with any method set during initialization + oldAreLegacyFlagsSet := o.AreLegacyFlagsSet + o.AreLegacyFlagsSet = func() bool { + if oldAreLegacyFlagsSet != nil && oldAreLegacyFlagsSet() { + return true + } + + return fs.Changed(authorizationModeFlag) || + fs.Changed(authorizationWebhookConfigFileFlag) || + fs.Changed(authorizationWebhookVersionFlag) || + fs.Changed(authorizationWebhookAuthorizedTTLFlag) || + fs.Changed(authorizationWebhookUnauthorizedTTLFlag) + } } // ToAuthorizationConfig convert BuiltInAuthorizationOptions to authorizer.Config -func (o *BuiltInAuthorizationOptions) ToAuthorizationConfig(versionedInformerFactory versionedinformers.SharedInformerFactory) authorizer.Config { - return authorizer.Config{ - AuthorizationModes: o.Modes, - PolicyFile: o.PolicyFile, - WebhookConfigFile: o.WebhookConfigFile, - WebhookVersion: o.WebhookVersion, - WebhookCacheAuthorizedTTL: o.WebhookCacheAuthorizedTTL, - WebhookCacheUnauthorizedTTL: o.WebhookCacheUnauthorizedTTL, - VersionedInformerFactory: versionedInformerFactory, - WebhookRetryBackoff: o.WebhookRetryBackoff, +func (o *BuiltInAuthorizationOptions) ToAuthorizationConfig(versionedInformerFactory versionedinformers.SharedInformerFactory) (*authorizer.Config, error) { + if o == nil { + return nil, nil + } + + var authorizationConfiguration *authzconfig.AuthorizationConfiguration + var err error + + // if --authorization-config is set, check if + // - the feature flag is set + // - legacyFlags are not set + // - the config file can be loaded + // - the config file represents a valid configuration + // else, + // - build the AuthorizationConfig from the legacy flags + if o.AuthorizationConfigurationFile != "" { + if !utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StructuredAuthorizationConfiguration) { + return nil, fmt.Errorf("--%s cannot be used without enabling StructuredAuthorizationConfiguration feature flag", authorizationConfigFlag) + } + + // error out if legacy flags are defined + if o.AreLegacyFlagsSet != nil && o.AreLegacyFlagsSet() { + return nil, fmt.Errorf("--%s can not be specified when --%s or --authorization-webhook-* flags are defined", authorizationConfigFlag, authorizationModeFlag) + } + + // load the file and check for errors + authorizationConfiguration, err = load.LoadFromFile(o.AuthorizationConfigurationFile) + if err != nil { + return nil, fmt.Errorf("failed to load AuthorizationConfiguration from file: %v", err) + } + + // validate the file and return any error + if errors := validation.ValidateAuthorizationConfiguration(nil, authorizationConfiguration, + sets.NewString(authzmodes.AuthorizationModeChoices...), + sets.NewString(repeatableAuthorizerTypes...), + ); len(errors) != 0 { + return nil, fmt.Errorf(errors.ToAggregate().Error()) + } + } else { + authorizationConfiguration, err = o.buildAuthorizationConfiguration() + if err != nil { + return nil, fmt.Errorf("failed to build authorization config: %s", err) + } } + + return &authorizer.Config{ + PolicyFile: o.PolicyFile, + VersionedInformerFactory: versionedInformerFactory, + WebhookRetryBackoff: o.WebhookRetryBackoff, + + AuthorizationConfiguration: authorizationConfiguration, + }, nil +} + +// buildAuthorizationConfiguration converts existing flags to the AuthorizationConfiguration format +func (o *BuiltInAuthorizationOptions) buildAuthorizationConfiguration() (*authzconfig.AuthorizationConfiguration, error) { + var authorizers []authzconfig.AuthorizerConfiguration + + if len(o.Modes) != sets.NewString(o.Modes...).Len() { + return nil, fmt.Errorf("modes should not be repeated in --authorization-mode") + } + + for _, mode := range o.Modes { + switch mode { + case authzmodes.ModeWebhook: + authorizers = append(authorizers, authzconfig.AuthorizerConfiguration{ + Type: authzconfig.TypeWebhook, + Name: defaultWebhookName, + Webhook: &authzconfig.WebhookConfiguration{ + AuthorizedTTL: metav1.Duration{Duration: o.WebhookCacheAuthorizedTTL}, + UnauthorizedTTL: metav1.Duration{Duration: o.WebhookCacheUnauthorizedTTL}, + // Timeout and FailurePolicy are required for the new configuration. + // Setting these two implicitly to preserve backward compatibility. + Timeout: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: authzconfig.FailurePolicyNoOpinion, + SubjectAccessReviewVersion: o.WebhookVersion, + ConnectionInfo: authzconfig.WebhookConnectionInfo{ + Type: authzconfig.AuthorizationWebhookConnectionInfoTypeKubeConfigFile, + KubeConfigFile: &o.WebhookConfigFile, + }, + }, + }) + default: + authorizers = append(authorizers, authzconfig.AuthorizerConfiguration{ + Type: authzconfig.AuthorizerType(mode), + Name: getNameForAuthorizerMode(mode), + }) + } + } + + return &authzconfig.AuthorizationConfiguration{Authorizers: authorizers}, nil +} + +// getNameForAuthorizerMode returns the name to be set for the mode in AuthorizationConfiguration +// For now, lower cases the mode name +func getNameForAuthorizerMode(mode string) string { + return strings.ToLower(mode) } diff --git a/pkg/kubeapiserver/options/authorization_test.go b/pkg/kubeapiserver/options/authorization_test.go index 97ef24ac2d1d5..306fa976df7cd 100644 --- a/pkg/kubeapiserver/options/authorization_test.go +++ b/pkg/kubeapiserver/options/authorization_test.go @@ -173,6 +173,13 @@ func TestBuiltInAuthorizationOptionsAddFlags(t *testing.T) { t.Fatal(err) } + if !opts.AreLegacyFlagsSet() { + t.Fatal("legacy flags should have been configured") + } + + // setting the method to nil since methods can't be compared with reflect.DeepEqual + opts.AreLegacyFlagsSet = nil + if !reflect.DeepEqual(opts, expected) { t.Error(cmp.Diff(opts, expected)) } diff --git a/pkg/kubeapiserver/options/cloudprovider.go b/pkg/kubeapiserver/options/cloudprovider.go index 9df580ac818ce..6977e8dd16bbc 100644 --- a/pkg/kubeapiserver/options/cloudprovider.go +++ b/pkg/kubeapiserver/options/cloudprovider.go @@ -17,7 +17,12 @@ limitations under the License. package options import ( + "fmt" + "github.com/spf13/pflag" + utilfeature "k8s.io/apiserver/pkg/util/feature" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/kubernetes/pkg/features" ) // CloudProviderOptions contains cloud provider config @@ -32,16 +37,42 @@ func NewCloudProviderOptions() *CloudProviderOptions { } // Validate checks invalid config -func (s *CloudProviderOptions) Validate() []error { - allErrors := []error{} - return allErrors +func (opts *CloudProviderOptions) Validate() []error { + var errs []error + + switch { + case opts.CloudProvider == "": + case opts.CloudProvider == "external": + if !utilfeature.DefaultFeatureGate.Enabled(features.DisableCloudProviders) { + errs = append(errs, fmt.Errorf("when using --cloud-provider set to '%s', "+ + "please set DisableCloudProviders feature to true", opts.CloudProvider)) + } + if !utilfeature.DefaultFeatureGate.Enabled(features.DisableKubeletCloudCredentialProviders) { + errs = append(errs, fmt.Errorf("when using --cloud-provider set to '%s', "+ + "please set DisableKubeletCloudCredentialProviders feature to true", opts.CloudProvider)) + } + case cloudprovider.IsDeprecatedInternal(opts.CloudProvider): + if utilfeature.DefaultFeatureGate.Enabled(features.DisableCloudProviders) { + errs = append(errs, fmt.Errorf("when using --cloud-provider set to '%s', "+ + "please set DisableCloudProviders feature to false", opts.CloudProvider)) + } + if utilfeature.DefaultFeatureGate.Enabled(features.DisableKubeletCloudCredentialProviders) { + errs = append(errs, fmt.Errorf("when using --cloud-provider set to '%s', "+ + "please set DisableKubeletCloudCredentialProviders feature to false", opts.CloudProvider)) + } + default: + errs = append(errs, fmt.Errorf("unknown --cloud-provider: %s", opts.CloudProvider)) + } + + return errs } // AddFlags returns flags of cloud provider for a API Server func (s *CloudProviderOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") - + fs.MarkDeprecated("cloud-provider", "will be removed in a future version") // nolint: errcheck fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") + fs.MarkDeprecated("cloud-config", "will be removed in a future version") // nolint: errcheck } diff --git a/pkg/kubelet/apis/config/fuzzer/fuzzer.go b/pkg/kubelet/apis/config/fuzzer/fuzzer.go index 79748b4b93f8d..dfa988c0d047c 100644 --- a/pkg/kubelet/apis/config/fuzzer/fuzzer.go +++ b/pkg/kubelet/apis/config/fuzzer/fuzzer.go @@ -61,6 +61,7 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.HealthzPort = 10248 obj.HTTPCheckFrequency = metav1.Duration{Duration: 20 * time.Second} obj.ImageMinimumGCAge = metav1.Duration{Duration: 2 * time.Minute} + obj.ImageMaximumGCAge = metav1.Duration{} obj.ImageGCHighThresholdPercent = 85 obj.ImageGCLowThresholdPercent = 80 obj.KernelMemcgNotification = false diff --git a/pkg/kubelet/apis/config/helpers_test.go b/pkg/kubelet/apis/config/helpers_test.go index 286602099f49a..31f406b85f2f2 100644 --- a/pkg/kubelet/apis/config/helpers_test.go +++ b/pkg/kubelet/apis/config/helpers_test.go @@ -230,6 +230,7 @@ var ( "ImageGCHighThresholdPercent", "ImageGCLowThresholdPercent", "ImageMinimumGCAge.Duration", + "ImageMaximumGCAge.Duration", "KernelMemcgNotification", "KubeAPIBurst", "KubeAPIQPS", diff --git a/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/after/v1beta1.yaml b/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/after/v1beta1.yaml index ab43f865f3a25..def3f0dc8446c 100644 --- a/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/after/v1beta1.yaml +++ b/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/after/v1beta1.yaml @@ -43,6 +43,7 @@ healthzPort: 10248 httpCheckFrequency: 20s imageGCHighThresholdPercent: 85 imageGCLowThresholdPercent: 80 +imageMaximumGCAge: 0s imageMinimumGCAge: 2m0s iptablesDropBit: 15 iptablesMasqueradeBit: 14 diff --git a/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/roundtrip/default/v1beta1.yaml b/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/roundtrip/default/v1beta1.yaml index 8b877c3ad4e80..ca5cf18b98398 100644 --- a/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/roundtrip/default/v1beta1.yaml +++ b/pkg/kubelet/apis/config/scheme/testdata/KubeletConfiguration/roundtrip/default/v1beta1.yaml @@ -43,6 +43,7 @@ healthzPort: 10248 httpCheckFrequency: 20s imageGCHighThresholdPercent: 85 imageGCLowThresholdPercent: 80 +imageMaximumGCAge: 0s imageMinimumGCAge: 2m0s iptablesDropBit: 15 iptablesMasqueradeBit: 14 diff --git a/pkg/kubelet/apis/config/types.go b/pkg/kubelet/apis/config/types.go index 1bff0cec0cacc..a64724a58d11c 100644 --- a/pkg/kubelet/apis/config/types.go +++ b/pkg/kubelet/apis/config/types.go @@ -192,9 +192,13 @@ type KubeletConfiguration struct { NodeStatusReportFrequency metav1.Duration // nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease. NodeLeaseDurationSeconds int32 - // imageMinimumGCAge is the minimum age for an unused image before it is + // ImageMinimumGCAge is the minimum age for an unused image before it is // garbage collected. ImageMinimumGCAge metav1.Duration + // ImageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. + // The default of this field is "0s", which disables this field--meaning images won't be garbage + // collected based on being unused for too long. + ImageMaximumGCAge metav1.Duration // imageGCHighThresholdPercent is the percent of disk usage after which // image garbage collection is always run. The percent is calculated as // this field value out of 100. diff --git a/pkg/kubelet/apis/config/v1beta1/defaults_test.go b/pkg/kubelet/apis/config/v1beta1/defaults_test.go index b387f50b625e4..55a6068e44d3f 100644 --- a/pkg/kubelet/apis/config/v1beta1/defaults_test.go +++ b/pkg/kubelet/apis/config/v1beta1/defaults_test.go @@ -77,6 +77,7 @@ func TestSetDefaultsKubeletConfiguration(t *testing.T) { NodeStatusReportFrequency: metav1.Duration{Duration: 5 * time.Minute}, NodeLeaseDurationSeconds: 40, ImageMinimumGCAge: metav1.Duration{Duration: 2 * time.Minute}, + ImageMaximumGCAge: metav1.Duration{}, ImageGCHighThresholdPercent: utilpointer.Int32(85), ImageGCLowThresholdPercent: utilpointer.Int32(80), ContainerRuntimeEndpoint: "unix:///run/containerd/containerd.sock", diff --git a/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go index 5376913670ce6..3704be3e1a123 100644 --- a/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -392,6 +392,7 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds out.ImageMinimumGCAge = in.ImageMinimumGCAge + out.ImageMaximumGCAge = in.ImageMaximumGCAge if err := v1.Convert_Pointer_int32_To_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { return err } @@ -579,6 +580,7 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds out.ImageMinimumGCAge = in.ImageMinimumGCAge + out.ImageMaximumGCAge = in.ImageMaximumGCAge if err := v1.Convert_int32_To_Pointer_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { return err } diff --git a/pkg/kubelet/apis/config/validation/validation.go b/pkg/kubelet/apis/config/validation/validation.go index 7d9b452b4fdcc..dd46a477cfd1e 100644 --- a/pkg/kubelet/apis/config/validation/validation.go +++ b/pkg/kubelet/apis/config/validation/validation.go @@ -83,6 +83,15 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur if kc.ImageGCLowThresholdPercent >= kc.ImageGCHighThresholdPercent { allErrors = append(allErrors, fmt.Errorf("invalid configuration: imageGCLowThresholdPercent (--image-gc-low-threshold) %v must be less than imageGCHighThresholdPercent (--image-gc-high-threshold) %v", kc.ImageGCLowThresholdPercent, kc.ImageGCHighThresholdPercent)) } + if kc.ImageMaximumGCAge.Duration != 0 && !localFeatureGate.Enabled(features.ImageMaximumGCAge) { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: ImageMaximumGCAge feature gate is required for Kubelet configuration option ImageMaximumGCAge")) + } + if kc.ImageMaximumGCAge.Duration < 0 { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: imageMaximumGCAge %v must not be negative", kc.ImageMaximumGCAge.Duration)) + } + if kc.ImageMaximumGCAge.Duration > 0 && kc.ImageMaximumGCAge.Duration <= kc.ImageMinimumGCAge.Duration { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: imageMaximumGCAge %v must be greater than imageMinimumGCAge %v", kc.ImageMaximumGCAge.Duration, kc.ImageMinimumGCAge.Duration)) + } if utilvalidation.IsInRange(int(kc.IPTablesDropBit), 0, 31) != nil { allErrors = append(allErrors, fmt.Errorf("invalid configuration: iptablesDropBit (--iptables-drop-bit) %v must be between 0 and 31, inclusive", kc.IPTablesDropBit)) } diff --git a/pkg/kubelet/apis/config/validation/validation_test.go b/pkg/kubelet/apis/config/validation/validation_test.go index 776a759737277..913c5dc3bdf5a 100644 --- a/pkg/kubelet/apis/config/validation/validation_test.go +++ b/pkg/kubelet/apis/config/validation/validation_test.go @@ -521,6 +521,30 @@ func TestValidateKubeletConfiguration(t *testing.T) { return conf }, errMsg: "invalid configuration: enableSystemLogHandler is required for enableSystemLogQuery", + }, { + name: "imageMaximumGCAge should not be specified without feature gate", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.ImageMaximumGCAge = metav1.Duration{Duration: 1} + return conf + }, + errMsg: "invalid configuration: ImageMaximumGCAge feature gate is required for Kubelet configuration option ImageMaximumGCAge", + }, { + name: "imageMaximumGCAge should not be negative", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.FeatureGates = map[string]bool{"ImageMaximumGCAge": true} + conf.ImageMaximumGCAge = metav1.Duration{Duration: -1} + return conf + }, + errMsg: "invalid configuration: imageMaximumGCAge -1ns must not be negative", + }, { + name: "imageMaximumGCAge should not be less than imageMinimumGCAge", + configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration { + conf.FeatureGates = map[string]bool{"ImageMaximumGCAge": true} + conf.ImageMaximumGCAge = metav1.Duration{Duration: 1} + conf.ImageMinimumGCAge = metav1.Duration{Duration: 2} + return conf + }, + errMsg: "invalid configuration: imageMaximumGCAge 1ns must be greater than imageMinimumGCAge 2ns", }} for _, tc := range cases { diff --git a/pkg/kubelet/apis/config/zz_generated.deepcopy.go b/pkg/kubelet/apis/config/zz_generated.deepcopy.go index a4af47e4a8307..e2c0cc1dd1efe 100644 --- a/pkg/kubelet/apis/config/zz_generated.deepcopy.go +++ b/pkg/kubelet/apis/config/zz_generated.deepcopy.go @@ -202,6 +202,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.ImageMinimumGCAge = in.ImageMinimumGCAge + out.ImageMaximumGCAge = in.ImageMaximumGCAge out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod if in.CPUManagerPolicyOptions != nil { in, out := &in.CPUManagerPolicyOptions, &out.CPUManagerPolicyOptions diff --git a/pkg/kubelet/apis/podresources/grpc/ratelimit.go b/pkg/kubelet/apis/grpc/ratelimit.go similarity index 72% rename from pkg/kubelet/apis/podresources/grpc/ratelimit.go rename to pkg/kubelet/apis/grpc/ratelimit.go index b9214b13c1619..149bcf279b20d 100644 --- a/pkg/kubelet/apis/podresources/grpc/ratelimit.go +++ b/pkg/kubelet/apis/grpc/ratelimit.go @@ -27,17 +27,6 @@ import ( "google.golang.org/grpc/status" ) -const ( - // DefaultQPS is determined by empirically reviewing known consumers of the API. - // It's at least unlikely that there is a legitimate need to query podresources - // more than 100 times per second, the other subsystems are not guaranteed to react - // so fast in the first place. - DefaultQPS = 100 - // DefaultBurstTokens is determined by empirically reviewing known consumers of the API. - // See the documentation of DefaultQPS, same caveats apply. - DefaultBurstTokens = 10 -) - var ( ErrorLimitExceeded = status.Error(codes.ResourceExhausted, "rejected by rate limit") ) @@ -59,9 +48,10 @@ func LimiterUnaryServerInterceptor(limiter Limiter) grpc.UnaryServerInterceptor } } -func WithRateLimiter(qps, burstTokens int32) grpc.ServerOption { +// WithRateLimiter creates new rate limiter with unary interceptor. +func WithRateLimiter(serviceName string, qps, burstTokens int32) grpc.ServerOption { qpsVal := gotimerate.Limit(qps) burstVal := int(burstTokens) - klog.InfoS("Setting rate limiting for podresources endpoint", "qps", qpsVal, "burstTokens", burstVal) + klog.InfoS("Setting rate limiting for endpoint", "service", serviceName, "qps", qpsVal, "burstTokens", burstVal) return grpc.UnaryInterceptor(LimiterUnaryServerInterceptor(gotimerate.NewLimiter(qpsVal, burstVal))) } diff --git a/pkg/kubelet/apis/podresources/constants.go b/pkg/kubelet/apis/podresources/constants.go index 6cc4c6a261a3b..97c4b930fd2d3 100644 --- a/pkg/kubelet/apis/podresources/constants.go +++ b/pkg/kubelet/apis/podresources/constants.go @@ -19,4 +19,14 @@ package podresources const ( // Socket is the name of the podresources server socket Socket = "kubelet" + + // DefaultQPS is determined by empirically reviewing known consumers of the API. + // It's at least unlikely that there is a legitimate need to query podresources + // more than 100 times per second, the other subsystems are not guaranteed to react + // so fast in the first place. + DefaultQPS = 100 + + // DefaultBurstTokens is determined by empirically reviewing known consumers of the API. + // See the documentation of DefaultQPS, same caveats apply. + DefaultBurstTokens = 10 ) diff --git a/pkg/kubelet/apis/podresources/server_v1_test.go b/pkg/kubelet/apis/podresources/server_v1_test.go index 4d342101d2f42..0852d7040d060 100644 --- a/pkg/kubelet/apis/podresources/server_v1_test.go +++ b/pkg/kubelet/apis/podresources/server_v1_test.go @@ -705,12 +705,13 @@ func TestGetPodResourcesV1(t *testing.T) { server := NewV1PodResourcesServer(providers) podReq := &podresourcesapi.GetPodResourcesRequest{PodName: podName, PodNamespace: podNamespace} resp, err := server.Get(context.TODO(), podReq) + if err != nil { if err.Error() != tc.err.Error() { t.Errorf("want exit = %v, got %v", tc.err, err) } } else { - if err != err { + if err != tc.err { t.Errorf("want exit = %v, got %v", tc.err, err) } else { if !equalGetResponse(tc.expectedResponse, resp) { diff --git a/pkg/kubelet/cadvisor/cadvisor_linux_test.go b/pkg/kubelet/cadvisor/cadvisor_linux_test.go index c508dccdaaeb6..144516939ce18 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux_test.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/google/cadvisor/container/crio" cadvisorfs "github.com/google/cadvisor/fs" ) @@ -37,7 +38,7 @@ func TestImageFsInfoLabel(t *testing.T) { expectedError error }{{ description: "LabelCrioImages should be returned", - runtimeEndpoint: CrioSocket, + runtimeEndpoint: crio.CrioSocket, expectedLabel: cadvisorfs.LabelCrioImages, expectedError: nil, }, { diff --git a/pkg/kubelet/cadvisor/helpers_linux.go b/pkg/kubelet/cadvisor/helpers_linux.go index c512d3d05101e..7851cf5376b41 100644 --- a/pkg/kubelet/cadvisor/helpers_linux.go +++ b/pkg/kubelet/cadvisor/helpers_linux.go @@ -21,6 +21,7 @@ package cadvisor import ( "fmt" + "strings" cadvisorfs "github.com/google/cadvisor/fs" ) @@ -37,7 +38,7 @@ func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) { // This is a temporary workaround to get stats for cri-o from cadvisor // and should be removed. // Related to https://github.com/kubernetes/kubernetes/issues/51798 - if i.runtimeEndpoint == CrioSocket || i.runtimeEndpoint == "unix://"+CrioSocket { + if strings.HasSuffix(i.runtimeEndpoint, CrioSocketSuffix) { return cadvisorfs.LabelCrioImages, nil } return "", fmt.Errorf("no imagefs label for configured runtime") diff --git a/pkg/kubelet/cadvisor/util.go b/pkg/kubelet/cadvisor/util.go index d5ad3a6b580b2..24f9e5eb50a2c 100644 --- a/pkg/kubelet/cadvisor/util.go +++ b/pkg/kubelet/cadvisor/util.go @@ -17,6 +17,8 @@ limitations under the License. package cadvisor import ( + "strings" + cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi2 "github.com/google/cadvisor/info/v2" "k8s.io/api/core/v1" @@ -25,10 +27,12 @@ import ( ) const ( - // CrioSocket is the path to the CRI-O socket. + // CrioSocketSuffix is the path to the CRI-O socket. // Please keep this in sync with the one in: // github.com/google/cadvisor/tree/master/container/crio/client.go - CrioSocket = "/var/run/crio/crio.sock" + // Note that however we only match on the suffix, as /var/run is often a + // symlink to /run, so the user can specify either path. + CrioSocketSuffix = "run/crio/crio.sock" ) // CapacityFromMachineInfo returns the capacity of the resources from the machine info. @@ -69,5 +73,5 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis // be removed. Related issue: // https://github.com/kubernetes/kubernetes/issues/51798 func UsingLegacyCadvisorStats(runtimeEndpoint string) bool { - return runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket + return strings.HasSuffix(runtimeEndpoint, CrioSocketSuffix) } diff --git a/pkg/kubelet/cadvisor/util_test.go b/pkg/kubelet/cadvisor/util_test.go index 2fa09e54f2fbb..375b1cf4fdbe8 100644 --- a/pkg/kubelet/cadvisor/util_test.go +++ b/pkg/kubelet/cadvisor/util_test.go @@ -21,6 +21,7 @@ package cadvisor import ( "reflect" + "strings" "testing" "github.com/google/cadvisor/container/crio" @@ -54,5 +55,5 @@ func TestCapacityFromMachineInfoWithHugePagesEnable(t *testing.T) { } func TestCrioSocket(t *testing.T) { - assert.EqualValues(t, CrioSocket, crio.CrioSocket, "CrioSocket in this package must equal the one in github.com/google/cadvisor/container/crio/client.go") + assert.True(t, strings.HasSuffix(crio.CrioSocket, CrioSocketSuffix), "CrioSocketSuffix in this package must be a suffix of the one in github.com/google/cadvisor/container/crio/client.go") } diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index f54eaa2979fd4..1cd5f662429f0 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -246,7 +246,7 @@ func (m *cgroupManagerImpl) Validate(name CgroupName) error { } difference := neededControllers.Difference(enabledControllers) if difference.Len() > 0 { - return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(difference.List(), ", ")) + return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(sets.List(difference), ", ")) } return nil // valid V2 cgroup } @@ -260,7 +260,7 @@ func (m *cgroupManagerImpl) Validate(name CgroupName) error { // scoped to the set control groups it understands. this is being discussed // in https://github.com/opencontainers/runc/issues/1440 // once resolved, we can remove this code. - allowlistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids") + allowlistControllers := sets.New[string]("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids") if _, ok := m.subsystems.MountPoints["hugetlb"]; ok { allowlistControllers.Insert("hugetlb") @@ -322,24 +322,24 @@ func getCPUWeight(cpuShares *uint64) uint64 { } // readUnifiedControllers reads the controllers available at the specified cgroup -func readUnifiedControllers(path string) (sets.String, error) { +func readUnifiedControllers(path string) (sets.Set[string], error) { controllersFileContent, err := os.ReadFile(filepath.Join(path, "cgroup.controllers")) if err != nil { return nil, err } controllers := strings.Fields(string(controllersFileContent)) - return sets.NewString(controllers...), nil + return sets.New(controllers...), nil } var ( availableRootControllersOnce sync.Once - availableRootControllers sets.String + availableRootControllers sets.Set[string] ) // getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2 -func getSupportedUnifiedControllers() sets.String { +func getSupportedUnifiedControllers() sets.Set[string] { // This is the set of controllers used by the Kubelet - supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb", "pids") + supportedControllers := sets.New("cpu", "cpuset", "memory", "hugetlb", "pids") // Memoize the set of controllers that are present in the root cgroup availableRootControllersOnce.Do(func() { var err error @@ -407,7 +407,7 @@ func (m *cgroupManagerImpl) maybeSetHugetlb(resourceConfig *ResourceConfig, reso } // For each page size enumerated, set that value. - pageSizes := sets.NewString() + pageSizes := sets.New[string]() for pageSize, limit := range resourceConfig.HugePageLimit { sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize) if err != nil { @@ -485,7 +485,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { cgroupFsName := m.Name(name) // Get a list of processes that we need to kill - pidsToKill := sets.NewInt() + pidsToKill := sets.New[int]() var pids []int for _, val := range m.subsystems.MountPoints { dir := path.Join(val, cgroupFsName) @@ -526,7 +526,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { klog.V(4).InfoS("Cgroup manager encountered error scanning pids for directory", "path", dir, "err", err) } } - return pidsToKill.List() + return sets.List(pidsToKill) } // ReduceCPULimits reduces the cgroup's cpu shares to the lowest possible value diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 527b66bf50e98..d5ac7cbb45987 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -164,7 +164,7 @@ type NodeAllocatableConfig struct { KubeReservedCgroupName string SystemReservedCgroupName string ReservedSystemCPUs cpuset.CPUSet - EnforceNodeAllocatable sets.String + EnforceNodeAllocatable sets.Set[string] KubeReserved v1.ResourceList SystemReserved v1.ResourceList HardEvictionThresholds []evictionapi.Threshold @@ -190,7 +190,7 @@ func parsePercentage(v string) (int64, error) { return percentage, nil } -// ParseQOSReserved parses the --qos-reserve-requests option +// ParseQOSReserved parses the --qos-reserved option func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) { reservations := make(map[v1.ResourceName]int64) for k, v := range m { diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 18001ed61f63c..7dbfd4cc66477 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -161,7 +161,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) { return f, nil } - expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory") + expectedCgroups := sets.New("cpu", "cpuacct", "cpuset", "memory") for _, mountPoint := range mountPoints { if mountPoint.Type == cgroupMountType { for _, opt := range mountPoint.Opts { @@ -176,7 +176,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) { } if expectedCgroups.Len() > 0 { - return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List()) + return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, sets.List(expectedCgroups)) } // Check if cpu quota is available. diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index eba774e8f624f..f0efd74e80e0c 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -453,7 +453,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C return acc.result, nil } if acc.isFailed() { - return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request") + return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size()) } // Algorithm: topology-aware best-fit @@ -565,7 +565,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu return acc.result, nil } if acc.isFailed() { - return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request") + return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size()) } // Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'. diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go index 2199114d65622..63b026b1979f2 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment_test.go @@ -522,7 +522,7 @@ func commonTakeByTopologyTestCases(t *testing.T) []takeByTopologyTestCase { topoSingleSocketHT, cpuset.New(0, 2, 4, 6), 5, - "not enough cpus available to satisfy request", + "not enough cpus available to satisfy request: requested=5, available=4", cpuset.New(), }, { diff --git a/pkg/kubelet/cm/cpumanager/policy_options.go b/pkg/kubelet/cm/cpumanager/policy_options.go index 2e275254c337c..368fc63624fd1 100644 --- a/pkg/kubelet/cm/cpumanager/policy_options.go +++ b/pkg/kubelet/cm/cpumanager/policy_options.go @@ -35,14 +35,14 @@ const ( ) var ( - alphaOptions = sets.NewString( + alphaOptions = sets.New[string]( DistributeCPUsAcrossNUMAOption, AlignBySocketOption, ) - betaOptions = sets.NewString( + betaOptions = sets.New[string]( FullPCPUsOnlyOption, ) - stableOptions = sets.NewString() + stableOptions = sets.New[string]() ) // CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings. diff --git a/pkg/kubelet/cm/cpumanager/policy_static_test.go b/pkg/kubelet/cm/cpumanager/policy_static_test.go index 0dcf78d49dcf4..b0f14ae04e7f4 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static_test.go +++ b/pkg/kubelet/cm/cpumanager/policy_static_test.go @@ -399,7 +399,7 @@ func TestStaticPolicyAdd(t *testing.T) { stAssignments: state.ContainerCPUAssignments{}, stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), - expErr: fmt.Errorf("not enough cpus available to satisfy request"), + expErr: fmt.Errorf("not enough cpus available to satisfy request: requested=8, available=7"), expCPUAlloc: false, expCSet: cpuset.New(), }, @@ -429,7 +429,7 @@ func TestStaticPolicyAdd(t *testing.T) { }, stDefaultCPUSet: cpuset.New(0, 4, 5, 6, 7, 8, 9, 10, 11), pod: makePod("fakePod", "fakeContainer5", "10000m", "10000m"), - expErr: fmt.Errorf("not enough cpus available to satisfy request"), + expErr: fmt.Errorf("not enough cpus available to satisfy request: requested=10, available=8"), expCPUAlloc: false, expCSet: cpuset.New(), }, @@ -444,7 +444,7 @@ func TestStaticPolicyAdd(t *testing.T) { }, stDefaultCPUSet: cpuset.New(0, 7), pod: makePod("fakePod", "fakeContainer5", "2000m", "2000m"), - expErr: fmt.Errorf("not enough cpus available to satisfy request"), + expErr: fmt.Errorf("not enough cpus available to satisfy request: requested=2, available=1"), expCPUAlloc: false, expCSet: cpuset.New(), }, @@ -461,7 +461,7 @@ func TestStaticPolicyAdd(t *testing.T) { }, stDefaultCPUSet: cpuset.New(10, 11, 53, 37, 55, 67, 52), pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"), - expErr: fmt.Errorf("not enough cpus available to satisfy request"), + expErr: fmt.Errorf("not enough cpus available to satisfy request: requested=76, available=7"), expCPUAlloc: false, expCSet: cpuset.New(), }, @@ -981,7 +981,7 @@ func TestStaticPolicyAddWithResvList(t *testing.T) { stAssignments: state.ContainerCPUAssignments{}, stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"), - expErr: fmt.Errorf("not enough cpus available to satisfy request"), + expErr: fmt.Errorf("not enough cpus available to satisfy request: requested=8, available=7"), expCPUAlloc: false, expCSet: cpuset.New(), }, diff --git a/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go b/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go index a8cb24be6e5be..46d0aaa2a2528 100644 --- a/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go +++ b/pkg/kubelet/cm/devicemanager/checkpoint/checkpoint.go @@ -62,9 +62,9 @@ func NewDevicesPerNUMA() DevicesPerNUMA { } // Devices is a function that returns all device ids for all NUMA nodes -// and represent it as sets.String -func (dev DevicesPerNUMA) Devices() sets.String { - result := sets.NewString() +// and represent it as sets.Set[string] +func (dev DevicesPerNUMA) Devices() sets.Set[string] { + result := sets.New[string]() for _, devs := range dev { result.Insert(devs...) diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index d780ee801bdc0..f552addbfabc3 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -74,13 +74,13 @@ type ManagerImpl struct { allDevices ResourceDeviceInstances // healthyDevices contains all of the registered healthy resourceNames and their exported device IDs. - healthyDevices map[string]sets.String + healthyDevices map[string]sets.Set[string] // unhealthyDevices contains all of the unhealthy devices and their exported device IDs. - unhealthyDevices map[string]sets.String + unhealthyDevices map[string]sets.Set[string] // allocatedDevices contains allocated deviceIds, keyed by resourceName. - allocatedDevices map[string]sets.String + allocatedDevices map[string]sets.Set[string] // podDevices contains pod to allocated device mapping. podDevices *podDevices @@ -106,7 +106,7 @@ type ManagerImpl struct { // containerRunningSet identifies which container among those present in `containerMap` // was reported running by the container runtime when `containerMap` was computed. // Used to detect pods running across a restart - containerRunningSet sets.String + containerRunningSet sets.Set[string] } type endpointInfo struct { @@ -117,7 +117,7 @@ type endpointInfo struct { type sourcesReadyStub struct{} // PodReusableDevices is a map by pod name of devices to reuse. -type PodReusableDevices map[string]map[string]sets.String +type PodReusableDevices map[string]map[string]sets.Set[string] func (s *sourcesReadyStub) AddSource(source string) {} func (s *sourcesReadyStub) AllReady() bool { return true } @@ -143,9 +143,9 @@ func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffi endpoints: make(map[string]endpointInfo), allDevices: NewResourceDeviceInstances(), - healthyDevices: make(map[string]sets.String), - unhealthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + unhealthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), numaNodes: numaNodes, topologyAffinityStore: topologyAffinityStore, @@ -236,12 +236,12 @@ func (m *ManagerImpl) PluginDisconnected(resourceName string) { m.mutex.Lock() defer m.mutex.Unlock() - if _, exists := m.endpoints[resourceName]; exists { + if ep, exists := m.endpoints[resourceName]; exists { m.markResourceUnhealthy(resourceName) - klog.V(2).InfoS("Endpoint became unhealthy", "resourceName", resourceName, "endpoint", m.endpoints[resourceName]) - } + klog.V(2).InfoS("Endpoint became unhealthy", "resourceName", resourceName, "endpoint", ep) - m.endpoints[resourceName].e.setStopTime(time.Now()) + ep.e.setStopTime(time.Now()) + } } // PluginListAndWatchReceiver receives ListAndWatchResponse from a device plugin @@ -259,8 +259,8 @@ func (m *ManagerImpl) PluginListAndWatchReceiver(resourceName string, resp *plug func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) { healthyCount := 0 m.mutex.Lock() - m.healthyDevices[resourceName] = sets.NewString() - m.unhealthyDevices[resourceName] = sets.NewString() + m.healthyDevices[resourceName] = sets.New[string]() + m.unhealthyDevices[resourceName] = sets.New[string]() m.allDevices[resourceName] = make(map[string]pluginapi.Device) for _, dev := range devices { m.allDevices[resourceName][dev.ID] = dev @@ -291,7 +291,7 @@ func (m *ManagerImpl) checkpointFile() string { // Start starts the Device Plugin Manager and start initialization of // podDevices and allocatedDevices information from checkpointed state and // starts device plugin registration service. -func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.String) error { +func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error { klog.V(2).InfoS("Starting Device Plugin manager") m.activePods = activePods @@ -323,7 +323,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error { m.setPodPendingAdmission(pod) if _, ok := m.devicesToReuse[string(pod.UID)]; !ok { - m.devicesToReuse[string(pod.UID)] = make(map[string]sets.String) + m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string]) } // If pod entries to m.devicesToReuse other than the current pod exist, delete them. for podUID := range m.devicesToReuse { @@ -365,13 +365,13 @@ func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, a func (m *ManagerImpl) markResourceUnhealthy(resourceName string) { klog.V(2).InfoS("Mark all resources Unhealthy for resource", "resourceName", resourceName) - healthyDevices := sets.NewString() + healthyDevices := sets.New[string]() if _, ok := m.healthyDevices[resourceName]; ok { healthyDevices = m.healthyDevices[resourceName] - m.healthyDevices[resourceName] = sets.NewString() + m.healthyDevices[resourceName] = sets.New[string]() } if _, ok := m.unhealthyDevices[resourceName]; !ok { - m.unhealthyDevices[resourceName] = sets.NewString() + m.unhealthyDevices[resourceName] = sets.New[string]() } m.unhealthyDevices[resourceName] = m.unhealthyDevices[resourceName].Union(healthyDevices) } @@ -392,7 +392,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) needsUpdateCheckpoint := false var capacity = v1.ResourceList{} var allocatable = v1.ResourceList{} - deletedResources := sets.NewString() + deletedResources := sets.New[string]() m.mutex.Lock() for resourceName, devices := range m.healthyDevices { eI, ok := m.endpoints[resourceName] @@ -492,8 +492,8 @@ func (m *ManagerImpl) readCheckpoint() error { for resource := range registeredDevs { // During start up, creates empty healthyDevices list so that the resource capacity // will stay zero till the corresponding device plugin re-registers. - m.healthyDevices[resource] = sets.NewString() - m.unhealthyDevices[resource] = sets.NewString() + m.healthyDevices[resource] = sets.New[string]() + m.unhealthyDevices[resource] = sets.New[string]() m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil} } return nil @@ -536,15 +536,15 @@ func (m *ManagerImpl) UpdateAllocatedDevices() { if len(podsToBeRemoved) <= 0 { return } - klog.V(3).InfoS("Pods to be removed", "podUIDs", podsToBeRemoved.List()) - m.podDevices.delete(podsToBeRemoved.List()) + klog.V(3).InfoS("Pods to be removed", "podUIDs", sets.List(podsToBeRemoved)) + m.podDevices.delete(sets.List(podsToBeRemoved)) // Regenerated allocatedDevices after we update pod allocation information. m.allocatedDevices = m.podDevices.devices() } // Returns list of device Ids we need to allocate with Allocate rpc call. // Returns empty list in case we don't need to issue the Allocate rpc call. -func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) { +func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.Set[string]) (sets.Set[string], error) { m.mutex.Lock() defer m.mutex.Unlock() needed := required @@ -552,7 +552,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // This can happen if a container restarts for example. devices := m.podDevices.containerDevices(podUID, contName, resource) if devices != nil { - klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", devices.List()) + klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", sets.List(devices)) needed = needed - devices.Len() // A pod's resource is not expected to change once admitted by the API server, // so just fail loudly here. We can revisit this part if this no longer holds. @@ -610,11 +610,11 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // Declare the list of allocated devices. // This will be populated and returned below. - allocated := sets.NewString() + allocated := sets.New[string]() // Create a closure to help with device allocation // Returns 'true' once no more devices need to be allocated. - allocateRemainingFrom := func(devices sets.String) bool { + allocateRemainingFrom := func(devices sets.Set[string]) bool { for device := range devices.Difference(allocated) { m.allocatedDevices[resource].Insert(device) allocated.Insert(device) @@ -628,7 +628,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // Needs to allocate additional devices. if m.allocatedDevices[resource] == nil { - m.allocatedDevices[resource] = sets.NewString() + m.allocatedDevices[resource] = sets.New[string]() } // Allocates from reusableDevices list first. @@ -697,22 +697,22 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed) } -func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.String) (sets.String, sets.String, sets.String) { +func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) { // If alignment information is not available, just pass the available list back. hint := m.topologyAffinityStore.GetAffinity(podUID, contName) if !m.deviceHasTopologyAlignment(resource) || hint.NUMANodeAffinity == nil { - return sets.NewString(), sets.NewString(), available + return sets.New[string](), sets.New[string](), available } // Build a map of NUMA Nodes to the devices associated with them. A // device may be associated to multiple NUMA nodes at the same time. If an // available device does not have any NUMA Nodes associated with it, add it // to a list of NUMA Nodes for the fake NUMANode -1. - perNodeDevices := make(map[int]sets.String) + perNodeDevices := make(map[int]sets.Set[string]) for d := range available { if m.allDevices[resource][d].Topology == nil || len(m.allDevices[resource][d].Topology.Nodes) == 0 { if _, ok := perNodeDevices[nodeWithoutTopology]; !ok { - perNodeDevices[nodeWithoutTopology] = sets.NewString() + perNodeDevices[nodeWithoutTopology] = sets.New[string]() } perNodeDevices[nodeWithoutTopology].Insert(d) continue @@ -720,7 +720,7 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa for _, node := range m.allDevices[resource][d].Topology.Nodes { if _, ok := perNodeDevices[int(node.ID)]; !ok { - perNodeDevices[int(node.ID)] = sets.NewString() + perNodeDevices[int(node.ID)] = sets.New[string]() } perNodeDevices[int(node.ID)].Insert(d) } @@ -791,14 +791,14 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa } // Return all three lists containing the full set of devices across them. - return sets.NewString(fromAffinity...), sets.NewString(notFromAffinity...), sets.NewString(withoutTopology...) + return sets.New[string](fromAffinity...), sets.New[string](notFromAffinity...), sets.New[string](withoutTopology...) } // allocateContainerResources attempts to allocate all of required device // plugin resources for the input container, issues an Allocate rpc request // for each new device resource requirement, processes their AllocateResponses, // and updates the cached containerDevices on success. -func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.String) error { +func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.Set[string]) error { podUID := string(pod.UID) contName := container.Name allocatedDevicesUpdated := false @@ -981,7 +981,7 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s // callGetPreferredAllocationIfAvailable issues GetPreferredAllocation grpc // call for device plugin resource with GetPreferredAllocationAvailable option set. -func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.String, size int) (sets.String, error) { +func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.Set[string], size int) (sets.Set[string], error) { eI, ok := m.endpoints[resource] if !ok { return nil, fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) @@ -1000,9 +1000,9 @@ func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, re return nil, fmt.Errorf("device plugin GetPreferredAllocation rpc failed with err: %v", err) } if resp != nil && len(resp.ContainerResponses) > 0 { - return sets.NewString(resp.ContainerResponses[0].DeviceIDs...), nil + return sets.New[string](resp.ContainerResponses[0].DeviceIDs...), nil } - return sets.NewString(), nil + return sets.New[string](), nil } // sanitizeNodeAllocatable scans through allocatedDevices in the device manager diff --git a/pkg/kubelet/cm/devicemanager/manager_test.go b/pkg/kubelet/cm/devicemanager/manager_test.go index 4a23e37858ca2..7eb9c53356e35 100644 --- a/pkg/kubelet/cm/devicemanager/manager_test.go +++ b/pkg/kubelet/cm/devicemanager/manager_test.go @@ -287,7 +287,7 @@ func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitor // test steady state, initialization where sourcesReady, containerMap and containerRunningSet // are relevant will be tested with a different flow - err = w.Start(activePods, &sourcesReadyStub{}, containermap.NewContainerMap(), sets.NewString()) + err = w.Start(activePods, &sourcesReadyStub{}, containermap.NewContainerMap(), sets.New[string]()) require.NoError(t, err) return w, updateChan @@ -312,6 +312,7 @@ func setupPluginManager(t *testing.T, pluginSocketName string, m Manager) plugin } func runPluginManager(pluginManager pluginmanager.PluginManager) { + // FIXME: Replace sets.String with sets.Set[string] sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) go pluginManager.Run(sourcesReady, wait.NeverStop) } @@ -459,8 +460,8 @@ func TestUpdateCapacityAllocatable(t *testing.T) { // properly rejected instead of being incorrectly started. err = testManager.writeCheckpoint() as.Nil(err) - testManager.healthyDevices = make(map[string]sets.String) - testManager.unhealthyDevices = make(map[string]sets.String) + testManager.healthyDevices = make(map[string]sets.Set[string]) + testManager.unhealthyDevices = make(map[string]sets.Set[string]) err = testManager.readCheckpoint() as.Nil(err) as.Equal(1, len(testManager.endpoints)) @@ -673,9 +674,9 @@ func TestCheckpoint(t *testing.T) { as.Nil(err) testManager := &ManagerImpl{ endpoints: make(map[string]endpointInfo), - healthyDevices: make(map[string]sets.String), - unhealthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + unhealthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), checkpointManager: ckm, } @@ -718,16 +719,16 @@ func TestCheckpoint(t *testing.T) { ), ) - testManager.healthyDevices[resourceName1] = sets.NewString() + testManager.healthyDevices[resourceName1] = sets.New[string]() testManager.healthyDevices[resourceName1].Insert("dev1") testManager.healthyDevices[resourceName1].Insert("dev2") testManager.healthyDevices[resourceName1].Insert("dev3") testManager.healthyDevices[resourceName1].Insert("dev4") testManager.healthyDevices[resourceName1].Insert("dev5") - testManager.healthyDevices[resourceName2] = sets.NewString() + testManager.healthyDevices[resourceName2] = sets.New[string]() testManager.healthyDevices[resourceName2].Insert("dev1") testManager.healthyDevices[resourceName2].Insert("dev2") - testManager.healthyDevices[resourceName3] = sets.NewString() + testManager.healthyDevices[resourceName3] = sets.New[string]() testManager.healthyDevices[resourceName3].Insert("dev5") expectedPodDevices := testManager.podDevices @@ -827,9 +828,9 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso return nil, err } m := &ManagerImpl{ - healthyDevices: make(map[string]sets.String), - unhealthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + unhealthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), endpoints: make(map[string]endpointInfo), podDevices: newPodDevices(), devicesToReuse: make(PodReusableDevices), @@ -846,7 +847,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso } for _, res := range testRes { - testManager.healthyDevices[res.resourceName] = sets.NewString(res.devs.Devices().UnsortedList()...) + testManager.healthyDevices[res.resourceName] = sets.New[string](res.devs.Devices().UnsortedList()...) if res.resourceName == "domain1.com/resource1" { testManager.endpoints[res.resourceName] = endpointInfo{ e: &MockEndpoint{allocateFunc: allocateStubFunc()}, @@ -953,22 +954,22 @@ func TestFilterByAffinity(t *testing.T) { } testCases := []struct { - available sets.String - fromAffinityExpected sets.String - notFromAffinityExpected sets.String - withoutTopologyExpected sets.String + available sets.Set[string] + fromAffinityExpected sets.Set[string] + notFromAffinityExpected sets.Set[string] + withoutTopologyExpected sets.Set[string] }{ { - available: sets.NewString("dev1", "dev2"), - fromAffinityExpected: sets.NewString("dev2"), - notFromAffinityExpected: sets.NewString("dev1"), - withoutTopologyExpected: sets.NewString(), + available: sets.New[string]("dev1", "dev2"), + fromAffinityExpected: sets.New[string]("dev2"), + notFromAffinityExpected: sets.New[string]("dev1"), + withoutTopologyExpected: sets.New[string](), }, { - available: sets.NewString("dev1", "dev2", "dev3", "dev4"), - fromAffinityExpected: sets.NewString("dev2", "dev3", "dev4"), - notFromAffinityExpected: sets.NewString("dev1"), - withoutTopologyExpected: sets.NewString(), + available: sets.New[string]("dev1", "dev2", "dev3", "dev4"), + fromAffinityExpected: sets.New[string]("dev2", "dev3", "dev4"), + notFromAffinityExpected: sets.New[string]("dev1"), + withoutTopologyExpected: sets.New[string](), }, } @@ -1087,9 +1088,9 @@ func TestPodContainerDeviceToAllocate(t *testing.T) { testManager := &ManagerImpl{ endpoints: make(map[string]endpointInfo), - healthyDevices: make(map[string]sets.String), - unhealthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + unhealthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), activePods: func() []*v1.Pod { return []*v1.Pod{} }, sourcesReady: &sourcesReadyStub{}, @@ -1121,8 +1122,8 @@ func TestPodContainerDeviceToAllocate(t *testing.T) { // no healthy devices for resourceName1 and devices corresponding to // resource2 are intentionally omitted to simulate that the resource // hasn't been registered. - testManager.healthyDevices[resourceName1] = sets.NewString() - testManager.healthyDevices[resourceName3] = sets.NewString() + testManager.healthyDevices[resourceName1] = sets.New[string]() + testManager.healthyDevices[resourceName3] = sets.New[string]() // dev5 is no longer in the list of healthy devices testManager.healthyDevices[resourceName3].Insert("dev7") testManager.healthyDevices[resourceName3].Insert("dev8") @@ -1133,8 +1134,8 @@ func TestPodContainerDeviceToAllocate(t *testing.T) { contName string resource string required int - reusableDevices sets.String - expectedAllocatedDevices sets.String + reusableDevices sets.Set[string] + expectedAllocatedDevices sets.Set[string] expErr error }{ { @@ -1143,7 +1144,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) { contName: "con1", resource: resourceName1, required: 2, - reusableDevices: sets.NewString(), + reusableDevices: sets.New[string](), expectedAllocatedDevices: nil, expErr: fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resourceName1), }, @@ -1153,7 +1154,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) { contName: "con2", resource: resourceName2, required: 1, - reusableDevices: sets.NewString(), + reusableDevices: sets.New[string](), expectedAllocatedDevices: nil, expErr: fmt.Errorf("cannot allocate unregistered device %s", resourceName2), }, @@ -1163,7 +1164,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) { contName: "con3", resource: resourceName3, required: 1, - reusableDevices: sets.NewString(), + reusableDevices: sets.New[string](), expectedAllocatedDevices: nil, expErr: fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resourceName3), }, @@ -1366,8 +1367,8 @@ func TestUpdatePluginResources(t *testing.T) { ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) as.Nil(err) m := &ManagerImpl{ - allocatedDevices: make(map[string]sets.String), - healthyDevices: make(map[string]sets.String), + allocatedDevices: make(map[string]sets.Set[string]), + healthyDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), checkpointManager: ckm, } @@ -1378,9 +1379,9 @@ func TestUpdatePluginResources(t *testing.T) { testManager.podDevices.devs[string(pod.UID)] = make(containerDevices) // require one of resource1 and one of resource2 - testManager.allocatedDevices[resourceName1] = sets.NewString() + testManager.allocatedDevices[resourceName1] = sets.New[string]() testManager.allocatedDevices[resourceName1].Insert(devID1) - testManager.allocatedDevices[resourceName2] = sets.NewString() + testManager.allocatedDevices[resourceName2] = sets.New[string]() testManager.allocatedDevices[resourceName2].Insert(devID2) cachedNode := &v1.Node{ @@ -1486,9 +1487,9 @@ func TestResetExtendedResource(t *testing.T) { as.Nil(err) testManager := &ManagerImpl{ endpoints: make(map[string]endpointInfo), - healthyDevices: make(map[string]sets.String), - unhealthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + unhealthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), checkpointManager: ckm, } @@ -1502,7 +1503,7 @@ func TestResetExtendedResource(t *testing.T) { ), ) - testManager.healthyDevices[extendedResourceName] = sets.NewString() + testManager.healthyDevices[extendedResourceName] = sets.New[string]() testManager.healthyDevices[extendedResourceName].Insert("dev1") // checkpoint is present, indicating node hasn't been recreated err = testManager.writeCheckpoint() diff --git a/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go b/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go index fe2b90ff1b1d4..bf66875a49344 100644 --- a/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go +++ b/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go @@ -69,8 +69,10 @@ func (c *client) Connect() error { klog.ErrorS(err, "Unable to connect to device plugin client with socket path", "path", c.socket) return err } + c.mutex.Lock() c.grpc = conn c.client = client + c.mutex.Unlock() return c.handler.PluginConnected(c.resource, c) } diff --git a/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go b/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go index 1d226f72a8530..fbec3456e46fa 100644 --- a/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go +++ b/pkg/kubelet/cm/devicemanager/plugin/v1beta1/stub.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "github.com/fsnotify/fsnotify" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -53,9 +54,13 @@ type Stub struct { // getPreferredAllocFunc is used for handling getPreferredAllocation request getPreferredAllocFunc stubGetPreferredAllocFunc + // registerControlFunc is used for controlling auto-registration of requests + registerControlFunc stubRegisterControlFunc + registrationStatus chan watcherapi.RegistrationStatus // for testing endpoint string // for testing + kubeletRestartWatcher *fsnotify.Watcher } // stubGetPreferredAllocFunc is the function called when a getPreferredAllocation request is received from Kubelet @@ -76,20 +81,36 @@ func defaultAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.De return &response, nil } +// stubRegisterControlFunc is the function called when a registration request is received from Kubelet +type stubRegisterControlFunc func() bool + +func defaultRegisterControlFunc() bool { + return true +} + // NewDevicePluginStub returns an initialized DevicePlugin Stub. func NewDevicePluginStub(devs []*pluginapi.Device, socket string, name string, preStartContainerFlag bool, getPreferredAllocationFlag bool) *Stub { + + watcher, err := fsnotify.NewWatcher() + if err != nil { + klog.ErrorS(err, "Watcher creation failed") + panic(err) + } + return &Stub{ devs: devs, socket: socket, resourceName: name, preStartContainerFlag: preStartContainerFlag, getPreferredAllocationFlag: getPreferredAllocationFlag, + registerControlFunc: defaultRegisterControlFunc, stop: make(chan interface{}), update: make(chan []*pluginapi.Device), allocFunc: defaultAllocFunc, getPreferredAllocFunc: defaultGetPreferredAllocFunc, + kubeletRestartWatcher: watcher, } } @@ -103,9 +124,15 @@ func (m *Stub) SetAllocFunc(f stubAllocFunc) { m.allocFunc = f } +// SetRegisterControlFunc sets RegisterControlFunc of the device plugin +func (m *Stub) SetRegisterControlFunc(f stubRegisterControlFunc) { + m.registerControlFunc = f +} + // Start starts the gRPC server of the device plugin. Can only // be called once. func (m *Stub) Start() error { + klog.InfoS("Starting device plugin server") err := m.cleanup() if err != nil { return err @@ -121,6 +148,12 @@ func (m *Stub) Start() error { pluginapi.RegisterDevicePluginServer(m.server, m) watcherapi.RegisterRegistrationServer(m.server, m) + err = m.kubeletRestartWatcher.Add(filepath.Dir(m.socket)) + if err != nil { + klog.ErrorS(err, "Failed to add watch", "devicePluginPath", pluginapi.DevicePluginPath) + return err + } + go func() { defer m.wg.Done() m.server.Serve(sock) @@ -144,13 +177,29 @@ func (m *Stub) Start() error { return nil } +func (m *Stub) Restart() error { + klog.InfoS("Restarting Device Plugin server") + if m.server == nil { + return nil + } + + m.server.Stop() + m.server = nil + + return m.Start() +} + // Stop stops the gRPC server. Can be called without a prior Start // and more than once. Not safe to be called concurrently by different // goroutines! func (m *Stub) Stop() error { + klog.InfoS("Stopping device plugin server") if m.server == nil { return nil } + + m.kubeletRestartWatcher.Close() + m.server.Stop() m.wg.Wait() m.server = nil @@ -159,6 +208,46 @@ func (m *Stub) Stop() error { return m.cleanup() } +func (m *Stub) Watch(kubeletEndpoint, resourceName, pluginSockDir string) { + for { + select { + // Detect a kubelet restart by watching for a newly created + // 'pluginapi.KubeletSocket' file. When this occurs, restart + // the device plugin server + case event := <-m.kubeletRestartWatcher.Events: + if event.Name == kubeletEndpoint && event.Op&fsnotify.Create == fsnotify.Create { + klog.InfoS("inotify: file created, restarting", "kubeletEndpoint", kubeletEndpoint) + var lastErr error + + err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 2*time.Minute, false, func(context.Context) (done bool, err error) { + restartErr := m.Restart() + if restartErr == nil { + return true, nil + } + klog.ErrorS(restartErr, "Retrying after error") + lastErr = restartErr + return false, nil + }) + if err != nil { + klog.ErrorS(err, "Unable to restart server: wait timed out", "lastErr", lastErr.Error()) + panic(err) + } + + if ok := m.registerControlFunc(); ok { + if err := m.Register(kubeletEndpoint, resourceName, pluginSockDir); err != nil { + klog.ErrorS(err, "Unable to register to kubelet") + panic(err) + } + } + } + + // Watch for any other fs errors and log them. + case err := <-m.kubeletRestartWatcher.Errors: + klog.ErrorS(err, "inotify error") + } + } +} + // GetInfo is the RPC which return pluginInfo func (m *Stub) GetInfo(ctx context.Context, req *watcherapi.InfoRequest) (*watcherapi.PluginInfo, error) { klog.InfoS("GetInfo") @@ -182,6 +271,8 @@ func (m *Stub) NotifyRegistrationStatus(ctx context.Context, status *watcherapi. // Register registers the device plugin for the given resourceName with Kubelet. func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir string) error { + klog.InfoS("Register", "kubeletEndpoint", kubeletEndpoint, "resourceName", resourceName, "socket", pluginSockDir) + if pluginSockDir != "" { if _, err := os.Stat(pluginSockDir + "DEPRECATION"); err == nil { klog.InfoS("Deprecation file found. Skip registration") @@ -214,6 +305,13 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir stri } _, err = client.Register(context.Background(), reqt) + if err != nil { + // Stop server + m.server.Stop() + klog.ErrorS(err, "Client unable to register to kubelet") + return err + } + klog.InfoS("Device Plugin registered with the Kubelet") return err } diff --git a/pkg/kubelet/cm/devicemanager/pod_devices.go b/pkg/kubelet/cm/devicemanager/pod_devices.go index fe4eb65e40557..a6b88ce4f70ee 100644 --- a/pkg/kubelet/cm/devicemanager/pod_devices.go +++ b/pkg/kubelet/cm/devicemanager/pod_devices.go @@ -52,10 +52,10 @@ func newPodDevices() *podDevices { return &podDevices{devs: make(map[string]containerDevices)} } -func (pdev *podDevices) pods() sets.String { +func (pdev *podDevices) pods() sets.Set[string] { pdev.RLock() defer pdev.RUnlock() - ret := sets.NewString() + ret := sets.New[string]() for k := range pdev.devs { ret.Insert(k) } @@ -100,11 +100,11 @@ func (pdev *podDevices) delete(pods []string) { // Returns list of device Ids allocated to the given pod for the given resource. // Returns nil if we don't have cached state for the given . -func (pdev *podDevices) podDevices(podUID, resource string) sets.String { +func (pdev *podDevices) podDevices(podUID, resource string) sets.Set[string] { pdev.RLock() defer pdev.RUnlock() - ret := sets.NewString() + ret := sets.New[string]() for contName := range pdev.devs[podUID] { ret = ret.Union(pdev.containerDevices(podUID, contName, resource)) } @@ -113,7 +113,7 @@ func (pdev *podDevices) podDevices(podUID, resource string) sets.String { // Returns list of device Ids allocated to the given container for the given resource. // Returns nil if we don't have cached state for the given . -func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.String { +func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.Set[string] { pdev.RLock() defer pdev.RUnlock() if _, podExists := pdev.devs[podUID]; !podExists { @@ -130,7 +130,7 @@ func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets } // Populates allocatedResources with the device resources allocated to the specified . -func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { +func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) { pdev.RLock() defer pdev.RUnlock() containers, exists := pdev.devs[podUID] @@ -147,7 +147,7 @@ func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, } // Removes the device resources allocated to the specified from allocatedResources. -func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { +func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) { pdev.RLock() defer pdev.RUnlock() containers, exists := pdev.devs[podUID] @@ -164,15 +164,15 @@ func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName strin } // Returns all of devices allocated to the pods being tracked, keyed by resourceName. -func (pdev *podDevices) devices() map[string]sets.String { - ret := make(map[string]sets.String) +func (pdev *podDevices) devices() map[string]sets.Set[string] { + ret := make(map[string]sets.Set[string]) pdev.RLock() defer pdev.RUnlock() for _, containerDevices := range pdev.devs { for _, resources := range containerDevices { for resource, devices := range resources { if _, exists := ret[resource]; !exists { - ret[resource] = sets.NewString() + ret[resource] = sets.New[string]() } if devices.allocResp != nil { ret[resource] = ret[resource].Union(devices.deviceIds.Devices()) @@ -464,9 +464,9 @@ func (rdev ResourceDeviceInstances) Clone() ResourceDeviceInstances { return clone } -// Filter takes a condition set expressed as map[string]sets.String and returns a new +// Filter takes a condition set expressed as map[string]sets.Set[string] and returns a new // ResourceDeviceInstances with only the devices matching the condition set. -func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.String) ResourceDeviceInstances { +func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.Set[string]) ResourceDeviceInstances { filtered := NewResourceDeviceInstances() for resourceName, filterIDs := range cond { if _, exists := rdev[resourceName]; !exists { diff --git a/pkg/kubelet/cm/devicemanager/pod_devices_test.go b/pkg/kubelet/cm/devicemanager/pod_devices_test.go index 70d5f6e6dead9..e6baf5f32db04 100644 --- a/pkg/kubelet/cm/devicemanager/pod_devices_test.go +++ b/pkg/kubelet/cm/devicemanager/pod_devices_test.go @@ -65,7 +65,7 @@ func TestGetContainerDevices(t *testing.T) { func TestResourceDeviceInstanceFilter(t *testing.T) { var expected string - var cond map[string]sets.String + var cond map[string]sets.Set[string] var resp ResourceDeviceInstances devs := ResourceDeviceInstances{ "foo": DeviceInstances{ @@ -103,40 +103,40 @@ func TestResourceDeviceInstanceFilter(t *testing.T) { }, } - resp = devs.Filter(map[string]sets.String{}) + resp = devs.Filter(map[string]sets.Set[string]{}) expected = `{}` expectResourceDeviceInstances(t, resp, expected) - cond = map[string]sets.String{ - "foo": sets.NewString("dev-foo1", "dev-foo2"), - "bar": sets.NewString("dev-bar1"), + cond = map[string]sets.Set[string]{ + "foo": sets.New[string]("dev-foo1", "dev-foo2"), + "bar": sets.New[string]("dev-bar1"), } resp = devs.Filter(cond) expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"}}}` expectResourceDeviceInstances(t, resp, expected) - cond = map[string]sets.String{ - "foo": sets.NewString("dev-foo1", "dev-foo2", "dev-foo3"), - "bar": sets.NewString("dev-bar1", "dev-bar2", "dev-bar3"), - "baz": sets.NewString("dev-baz1", "dev-baz2", "dev-baz3"), + cond = map[string]sets.Set[string]{ + "foo": sets.New[string]("dev-foo1", "dev-foo2", "dev-foo3"), + "bar": sets.New[string]("dev-bar1", "dev-bar2", "dev-bar3"), + "baz": sets.New[string]("dev-baz1", "dev-baz2", "dev-baz3"), } resp = devs.Filter(cond) expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}` expectResourceDeviceInstances(t, resp, expected) - cond = map[string]sets.String{ - "foo": sets.NewString("dev-foo1", "dev-foo2", "dev-foo3", "dev-foo4"), - "bar": sets.NewString("dev-bar1", "dev-bar2", "dev-bar3", "dev-bar4"), - "baz": sets.NewString("dev-baz1", "dev-baz2", "dev-baz3", "dev-bar4"), + cond = map[string]sets.Set[string]{ + "foo": sets.New[string]("dev-foo1", "dev-foo2", "dev-foo3", "dev-foo4"), + "bar": sets.New[string]("dev-bar1", "dev-bar2", "dev-bar3", "dev-bar4"), + "baz": sets.New[string]("dev-baz1", "dev-baz2", "dev-baz3", "dev-bar4"), } resp = devs.Filter(cond) expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}` expectResourceDeviceInstances(t, resp, expected) - cond = map[string]sets.String{ - "foo": sets.NewString("dev-foo1", "dev-foo4", "dev-foo7"), - "bar": sets.NewString("dev-bar1", "dev-bar4", "dev-bar7"), - "baz": sets.NewString("dev-baz1", "dev-baz4", "dev-baz7"), + cond = map[string]sets.Set[string]{ + "foo": sets.New[string]("dev-foo1", "dev-foo4", "dev-foo7"), + "bar": sets.New[string]("dev-bar1", "dev-bar4", "dev-bar7"), + "baz": sets.New[string]("dev-baz1", "dev-baz4", "dev-baz7"), } resp = devs.Filter(cond) expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"baz":{"dev-baz1":{"ID":"baz1"}},"foo":{"dev-foo1":{"ID":"foo1"}}}` diff --git a/pkg/kubelet/cm/devicemanager/topology_hints.go b/pkg/kubelet/cm/devicemanager/topology_hints.go index d68febd2f0587..12f515d365821 100644 --- a/pkg/kubelet/cm/devicemanager/topology_hints.go +++ b/pkg/kubelet/cm/devicemanager/topology_hints.go @@ -63,7 +63,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map continue } klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod), "containerName", container.Name) - deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested) + deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested) continue } @@ -118,7 +118,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana continue } klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod)) - deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested) + deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested) continue } @@ -132,7 +132,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana // Generate TopologyHints for this resource given the current // request size and the list of available devices. - deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.String{}, requested) + deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.Set[string]{}, requested) } return deviceHints @@ -148,12 +148,12 @@ func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool { return false } -func (m *ManagerImpl) getAvailableDevices(resource string) sets.String { +func (m *ManagerImpl) getAvailableDevices(resource string) sets.Set[string] { // Strip all devices in use from the list of healthy ones. return m.healthyDevices[resource].Difference(m.allocatedDevices[resource]) } -func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.String, reusable sets.String, request int) []topologymanager.TopologyHint { +func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.Set[string], reusable sets.Set[string], request int) []topologymanager.TopologyHint { // Initialize minAffinitySize to include all NUMA Nodes minAffinitySize := len(m.numaNodes) diff --git a/pkg/kubelet/cm/devicemanager/topology_hints_test.go b/pkg/kubelet/cm/devicemanager/topology_hints_test.go index a7bc5157366e5..43bfafe373c2d 100644 --- a/pkg/kubelet/cm/devicemanager/topology_hints_test.go +++ b/pkg/kubelet/cm/devicemanager/topology_hints_test.go @@ -61,8 +61,8 @@ func TestGetTopologyHints(t *testing.T) { for _, tc := range tcases { m := ManagerImpl{ allDevices: NewResourceDeviceInstances(), - healthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), sourcesReady: &sourcesReadyStub{}, activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod} }, @@ -71,7 +71,7 @@ func TestGetTopologyHints(t *testing.T) { for r := range tc.devices { m.allDevices[r] = make(DeviceInstances) - m.healthyDevices[r] = sets.NewString() + m.healthyDevices[r] = sets.New[string]() for _, d := range tc.devices[r] { m.allDevices[r][d.ID] = d @@ -84,7 +84,7 @@ func TestGetTopologyHints(t *testing.T) { for r, devices := range tc.allocatedDevices[p][c] { m.podDevices.insert(p, c, r, constructDevices(devices), nil) - m.allocatedDevices[r] = sets.NewString() + m.allocatedDevices[r] = sets.New[string]() for _, d := range devices { m.allocatedDevices[r].Insert(d) } @@ -414,8 +414,8 @@ func TestTopologyAlignedAllocation(t *testing.T) { for _, tc := range tcases { m := ManagerImpl{ allDevices: NewResourceDeviceInstances(), - healthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), endpoints: make(map[string]endpointInfo), podDevices: newPodDevices(), sourcesReady: &sourcesReadyStub{}, @@ -424,7 +424,7 @@ func TestTopologyAlignedAllocation(t *testing.T) { } m.allDevices[tc.resource] = make(DeviceInstances) - m.healthyDevices[tc.resource] = sets.NewString() + m.healthyDevices[tc.resource] = sets.New[string]() m.endpoints[tc.resource] = endpointInfo{} for _, d := range tc.devices { @@ -441,7 +441,7 @@ func TestTopologyAlignedAllocation(t *testing.T) { } } - allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString()) + allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.New[string]()) if err != nil { t.Errorf("Unexpected error: %v", err) continue @@ -603,8 +603,8 @@ func TestGetPreferredAllocationParameters(t *testing.T) { for _, tc := range tcases { m := ManagerImpl{ allDevices: NewResourceDeviceInstances(), - healthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), endpoints: make(map[string]endpointInfo), podDevices: newPodDevices(), sourcesReady: &sourcesReadyStub{}, @@ -613,13 +613,13 @@ func TestGetPreferredAllocationParameters(t *testing.T) { } m.allDevices[tc.resource] = make(DeviceInstances) - m.healthyDevices[tc.resource] = sets.NewString() + m.healthyDevices[tc.resource] = sets.New[string]() for _, d := range tc.allDevices { m.allDevices[tc.resource][d.ID] = d m.healthyDevices[tc.resource].Insert(d.ID) } - m.allocatedDevices[tc.resource] = sets.NewString() + m.allocatedDevices[tc.resource] = sets.New[string]() for _, d := range tc.allocatedDevices { m.allocatedDevices[tc.resource].Insert(d) } @@ -639,17 +639,17 @@ func TestGetPreferredAllocationParameters(t *testing.T) { opts: &pluginapi.DevicePluginOptions{GetPreferredAllocationAvailable: true}, } - _, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString(tc.reusableDevices...)) + _, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.New[string](tc.reusableDevices...)) if err != nil { t.Errorf("Unexpected error: %v", err) continue } - if !sets.NewString(actualAvailable...).Equal(sets.NewString(tc.expectedAvailable...)) { + if !sets.New[string](actualAvailable...).Equal(sets.New[string](tc.expectedAvailable...)) { t.Errorf("%v. expected available: %v but got: %v", tc.description, tc.expectedAvailable, actualAvailable) } - if !sets.NewString(actualAvailable...).Equal(sets.NewString(tc.expectedAvailable...)) { + if !sets.New[string](actualAvailable...).Equal(sets.New[string](tc.expectedAvailable...)) { t.Errorf("%v. expected mustInclude: %v but got: %v", tc.description, tc.expectedMustInclude, actualMustInclude) } @@ -903,11 +903,11 @@ func TestGetPodDeviceRequest(t *testing.T) { for _, tc := range tcases { m := ManagerImpl{ - healthyDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), } for _, res := range tc.registeredDevices { - m.healthyDevices[res] = sets.NewString() + m.healthyDevices[res] = sets.New[string]() } accumulatedResourceRequests := m.getPodDeviceRequest(tc.pod) @@ -925,8 +925,8 @@ func TestGetPodTopologyHints(t *testing.T) { for _, tc := range tcases { m := ManagerImpl{ allDevices: NewResourceDeviceInstances(), - healthyDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]sets.String), + healthyDevices: make(map[string]sets.Set[string]), + allocatedDevices: make(map[string]sets.Set[string]), podDevices: newPodDevices(), sourcesReady: &sourcesReadyStub{}, activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod, {ObjectMeta: metav1.ObjectMeta{UID: "fakeOtherPod"}}} }, @@ -935,7 +935,7 @@ func TestGetPodTopologyHints(t *testing.T) { for r := range tc.devices { m.allDevices[r] = make(DeviceInstances) - m.healthyDevices[r] = sets.NewString() + m.healthyDevices[r] = sets.New[string]() for _, d := range tc.devices[r] { //add `pluginapi.Device` with Topology @@ -949,7 +949,7 @@ func TestGetPodTopologyHints(t *testing.T) { for r, devices := range tc.allocatedDevices[p][c] { m.podDevices.insert(p, c, r, constructDevices(devices), nil) - m.allocatedDevices[r] = sets.NewString() + m.allocatedDevices[r] = sets.New[string]() for _, d := range devices { m.allocatedDevices[r].Insert(d) } diff --git a/pkg/kubelet/cm/devicemanager/types.go b/pkg/kubelet/cm/devicemanager/types.go index fb330568adc33..7e3261c667d38 100644 --- a/pkg/kubelet/cm/devicemanager/types.go +++ b/pkg/kubelet/cm/devicemanager/types.go @@ -33,7 +33,7 @@ import ( // Manager manages all the Device Plugins running on a node. type Manager interface { // Start starts device plugin registration service. - Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.String) error + Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error // Allocate configures and assigns devices to a container in a pod. From // the requested device resources, Allocate will communicate with the diff --git a/pkg/registry/networking/clustercidr/doc.go b/pkg/kubelet/cm/doc.go similarity index 56% rename from pkg/registry/networking/clustercidr/doc.go rename to pkg/kubelet/cm/doc.go index ebd30f6330426..422aca0750bb9 100644 --- a/pkg/registry/networking/clustercidr/doc.go +++ b/pkg/kubelet/cm/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clustercidr // import "k8s.io/kubernetes/pkg/registry/networking/clustercidr" +// Package cm (abbreviation of "container manager") and its subpackages contain all the kubelet code +// to manage containers. For example, they contain functions to configure containers' cgroups, +// ensure containers run with the desired QoS, and allocate compute resources like cpus, memory, +// devices... +package cm // import "k8s.io/kubernetes/pkg/kubelet/cm" diff --git a/pkg/kubelet/cm/dra/claiminfo.go b/pkg/kubelet/cm/dra/claiminfo.go index 7266f9e72b28f..d369b8d3e33ea 100644 --- a/pkg/kubelet/cm/dra/claiminfo.go +++ b/pkg/kubelet/cm/dra/claiminfo.go @@ -33,9 +33,10 @@ import ( type ClaimInfo struct { sync.RWMutex state.ClaimInfoState - // annotations is a list of container annotations associated with + // annotations is a mapping of container annotations per DRA plugin associated with // a prepared resource - annotations []kubecontainer.Annotation + annotations map[string][]kubecontainer.Annotation + prepared bool } func (info *ClaimInfo) addPodReference(podUID types.UID) { @@ -69,11 +70,23 @@ func (info *ClaimInfo) addCDIDevices(pluginName string, cdiDevices []string) err } info.CDIDevices[pluginName] = cdiDevices - info.annotations = append(info.annotations, annotations...) + info.annotations[pluginName] = annotations return nil } +// annotationsAsList returns container annotations as a single list. +func (info *ClaimInfo) annotationsAsList() []kubecontainer.Annotation { + info.RLock() + defer info.RUnlock() + + var lst []kubecontainer.Annotation + for _, v := range info.annotations { + lst = append(lst, v...) + } + return lst +} + // claimInfoCache is a cache of processed resource claims keyed by namespace + claim name. type claimInfoCache struct { sync.RWMutex @@ -93,10 +106,33 @@ func newClaimInfo(driverName, className string, claimUID types.UID, claimName, n } claimInfo := ClaimInfo{ ClaimInfoState: claimInfoState, + annotations: make(map[string][]kubecontainer.Annotation), } return &claimInfo } +// newClaimInfoFromResourceClaim creates a new ClaimInfo object +func newClaimInfoFromResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim) *ClaimInfo { + // Grab the allocation.resourceHandles. If there are no + // allocation.resourceHandles, create a single resourceHandle with no + // content. This will trigger processing of this claim by a single + // kubelet plugin whose name matches resourceClaim.Status.DriverName. + resourceHandles := resourceClaim.Status.Allocation.ResourceHandles + if len(resourceHandles) == 0 { + resourceHandles = make([]resourcev1alpha2.ResourceHandle, 1) + } + + return newClaimInfo( + resourceClaim.Status.DriverName, + resourceClaim.Spec.ResourceClassName, + resourceClaim.UID, + resourceClaim.Name, + resourceClaim.Namespace, + make(sets.Set[string]), + resourceHandles, + ) +} + // newClaimInfoCache is a function that returns an instance of the claimInfoCache. func newClaimInfoCache(stateDir, checkpointName string) (*claimInfoCache, error) { stateImpl, err := state.NewCheckpointState(stateDir, checkpointName) diff --git a/pkg/kubelet/cm/dra/manager.go b/pkg/kubelet/cm/dra/manager.go index 703eae58b4fce..62a2bd4cd4fc2 100644 --- a/pkg/kubelet/cm/dra/manager.go +++ b/pkg/kubelet/cm/dra/manager.go @@ -21,10 +21,8 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/dynamic-resource-allocation/resourceclaim" "k8s.io/klog/v2" @@ -109,42 +107,30 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { continue } - // Is the resource already prepared? Then add the pod UID to it. - if claimInfo := m.cache.get(*claimName, pod.Namespace); claimInfo != nil { - // We delay checkpointing of this change until this call - // returns successfully. It is OK to do this because we - // will only return successfully from this call if the - // checkpoint has succeeded. That means if the kubelet is - // ever restarted before this checkpoint succeeds, the pod - // whose resources are being prepared would never have - // started, so it's OK (actually correct) to not include it - // in the cache. - claimInfo.addPodReference(pod.UID) + claimInfo := m.cache.get(*claimName, pod.Namespace) + if claimInfo == nil { + // claim does not exist in cache, create new claimInfo object + // to be processed later. + claimInfo = newClaimInfoFromResourceClaim(resourceClaim) + } + + // We delay checkpointing of this change until this call + // returns successfully. It is OK to do this because we + // will only return successfully from this call if the + // checkpoint has succeeded. That means if the kubelet is + // ever restarted before this checkpoint succeeds, the pod + // whose resources are being prepared would never have + // started, so it's OK (actually correct) to not include it + // in the cache. + claimInfo.addPodReference(pod.UID) + + if claimInfo.prepared { + // Already prepared this claim, no need to prepare it again continue } - // Grab the allocation.resourceHandles. If there are no - // allocation.resourceHandles, create a single resourceHandle with no - // content. This will trigger processing of this claim by a single - // kubelet plugin whose name matches resourceClaim.Status.DriverName. - resourceHandles := resourceClaim.Status.Allocation.ResourceHandles - if len(resourceHandles) == 0 { - resourceHandles = make([]resourcev1alpha2.ResourceHandle, 1) - } - - // Create a claimInfo object to store the relevant claim info. - claimInfo := newClaimInfo( - resourceClaim.Status.DriverName, - resourceClaim.Spec.ResourceClassName, - resourceClaim.UID, - resourceClaim.Name, - resourceClaim.Namespace, - sets.New(string(pod.UID)), - resourceHandles, - ) - // Loop through all plugins and prepare for calling NodePrepareResources. - for _, resourceHandle := range resourceHandles { + for _, resourceHandle := range claimInfo.ResourceHandles { // If no DriverName is provided in the resourceHandle, we // use the DriverName from the status pluginName := resourceHandle.DriverName @@ -193,6 +179,8 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { if err != nil { return fmt.Errorf("failed to add CDIDevices to claimInfo %+v: %+v", claimInfo, err) } + // mark claim as (successfully) prepared by manager, so next time we dont prepare it. + claimInfo.prepared = true // TODO: We (re)add the claimInfo object to the cache and // sync it to the checkpoint *after* the @@ -291,8 +279,9 @@ func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*Conta } claimInfo.RLock() - klog.V(3).InfoS("Add resource annotations", "claim", *claimName, "annotations", claimInfo.annotations) - annotations = append(annotations, claimInfo.annotations...) + claimAnnotations := claimInfo.annotationsAsList() + klog.V(3).InfoS("Add resource annotations", "claim", *claimName, "annotations", claimAnnotations) + annotations = append(annotations, claimAnnotations...) for _, devices := range claimInfo.CDIDevices { for _, device := range devices { cdiDevices = append(cdiDevices, kubecontainer.CDIDevice{Name: device}) diff --git a/pkg/kubelet/cm/dra/manager_test.go b/pkg/kubelet/cm/dra/manager_test.go index e25fb855b89b1..1e90a29693f17 100644 --- a/pkg/kubelet/cm/dra/manager_test.go +++ b/pkg/kubelet/cm/dra/manager_test.go @@ -22,6 +22,7 @@ import ( "net" "os" "path/filepath" + "sync/atomic" "testing" "time" @@ -46,11 +47,15 @@ const ( type fakeDRADriverGRPCServer struct { drapbv1.UnimplementedNodeServer - driverName string - timeout *time.Duration + driverName string + timeout *time.Duration + prepareResourceCalls atomic.Uint32 + unprepareResourceCalls atomic.Uint32 } func (s *fakeDRADriverGRPCServer) NodePrepareResources(ctx context.Context, req *drapbv1.NodePrepareResourcesRequest) (*drapbv1.NodePrepareResourcesResponse, error) { + s.prepareResourceCalls.Add(1) + if s.timeout != nil { time.Sleep(*s.timeout) } @@ -60,6 +65,8 @@ func (s *fakeDRADriverGRPCServer) NodePrepareResources(ctx context.Context, req } func (s *fakeDRADriverGRPCServer) NodeUnprepareResources(ctx context.Context, req *drapbv1.NodeUnprepareResourcesRequest) (*drapbv1.NodeUnprepareResourcesResponse, error) { + s.unprepareResourceCalls.Add(1) + if s.timeout != nil { time.Sleep(*s.timeout) } @@ -68,10 +75,23 @@ func (s *fakeDRADriverGRPCServer) NodeUnprepareResources(ctx context.Context, re type tearDown func() -func setupFakeDRADriverGRPCServer(shouldTimeout bool) (string, tearDown, error) { +type fakeDRAServerInfo struct { + // fake DRA server + server *fakeDRADriverGRPCServer + // fake DRA plugin socket name + socketName string + // teardownFn stops fake gRPC server + teardownFn tearDown +} + +func setupFakeDRADriverGRPCServer(shouldTimeout bool) (fakeDRAServerInfo, error) { socketDir, err := os.MkdirTemp("", "dra") if err != nil { - return "", nil, err + return fakeDRAServerInfo{ + server: nil, + socketName: "", + teardownFn: nil, + }, err } socketName := filepath.Join(socketDir, "server.sock") @@ -85,7 +105,11 @@ func setupFakeDRADriverGRPCServer(shouldTimeout bool) (string, tearDown, error) l, err := net.Listen("unix", socketName) if err != nil { teardown() - return "", nil, err + return fakeDRAServerInfo{ + server: nil, + socketName: "", + teardownFn: nil, + }, err } s := grpc.NewServer() @@ -105,7 +129,11 @@ func setupFakeDRADriverGRPCServer(shouldTimeout bool) (string, tearDown, error) s.GracefulStop() }() - return socketName, teardown, nil + return fakeDRAServerInfo{ + server: fakeDRADriverGRPCServer, + socketName: socketName, + teardownFn: teardown, + }, nil } func TestNewManagerImpl(t *testing.T) { @@ -177,10 +205,12 @@ func TestGetResources(t *testing.T) { }, }, claimInfo: &ClaimInfo{ - annotations: []kubecontainer.Annotation{ - { - Name: "test-annotation", - Value: "123", + annotations: map[string][]kubecontainer.Annotation{ + "test-plugin": { + { + Name: "test-annotation", + Value: "123", + }, }, }, ClaimInfoState: state.ClaimInfoState{ @@ -280,14 +310,15 @@ func TestPrepareResources(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset() for _, test := range []struct { - description string - driverName string - pod *v1.Pod - claimInfo *ClaimInfo - resourceClaim *resourcev1alpha2.ResourceClaim - wantErr bool - wantTimeout bool - wantResourceSkipped bool + description string + driverName string + pod *v1.Pod + claimInfo *ClaimInfo + resourceClaim *resourcev1alpha2.ResourceClaim + wantErr bool + wantTimeout bool + wantResourceSkipped bool + ExpectedPrepareCalls uint32 }{ { description: "failed to fetch ResourceClaim", @@ -497,6 +528,7 @@ func TestPrepareResources(t *testing.T) { Namespace: "test-namespace", PodUIDs: sets.Set[string]{"test-another-pod-reserved": sets.Empty{}}, }, + prepared: true, }, resourceClaim: &resourcev1alpha2.ResourceClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -574,11 +606,12 @@ func TestPrepareResources(t *testing.T) { }, }, }, - wantErr: true, - wantTimeout: true, + wantErr: true, + wantTimeout: true, + ExpectedPrepareCalls: 1, }, { - description: "should prepare resource", + description: "should prepare resource, claim not in cache", driverName: driverName, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -630,6 +663,78 @@ func TestPrepareResources(t *testing.T) { }, }, }, + ExpectedPrepareCalls: 1, + }, + { + description: "should prepare resource. claim in cache, manager did not prepare resource", + driverName: driverName, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-namespace", + UID: "test-reserved", + }, + Spec: v1.PodSpec{ + ResourceClaims: []v1.PodResourceClaim{ + { + Name: "test-pod-claim", + Source: v1.ClaimSource{ResourceClaimName: func() *string { + s := "test-pod-claim" + return &s + }()}, + }, + }, + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Claims: []v1.ResourceClaim{ + { + Name: "test-pod-claim", + }, + }, + }, + }, + }, + }, + }, + claimInfo: &ClaimInfo{ + ClaimInfoState: state.ClaimInfoState{ + DriverName: driverName, + ClassName: "test-class", + ClaimName: "test-pod-claim", + ClaimUID: "test-reserved", + Namespace: "test-namespace", + PodUIDs: sets.Set[string]{"test-reserved": sets.Empty{}}, + CDIDevices: map[string][]string{ + driverName: {fmt.Sprintf("%s/%s=some-device", driverName, driverClassName)}, + }, + ResourceHandles: []resourcev1alpha2.ResourceHandle{{Data: "test-data"}}, + }, + annotations: make(map[string][]kubecontainer.Annotation), + prepared: false, + }, + resourceClaim: &resourcev1alpha2.ResourceClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-claim", + Namespace: "test-namespace", + UID: "test-reserved", + }, + Spec: resourcev1alpha2.ResourceClaimSpec{ + ResourceClassName: "test-class", + }, + Status: resourcev1alpha2.ResourceClaimStatus{ + DriverName: driverName, + Allocation: &resourcev1alpha2.AllocationResult{ + ResourceHandles: []resourcev1alpha2.ResourceHandle{ + {Data: "test-data"}, + }, + }, + ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{ + {UID: "test-reserved"}, + }, + }, + }, + ExpectedPrepareCalls: 1, }, } { t.Run(test.description, func(t *testing.T) { @@ -649,14 +754,14 @@ func TestPrepareResources(t *testing.T) { } } - socketName, teardown, err := setupFakeDRADriverGRPCServer(test.wantTimeout) + draServerInfo, err := setupFakeDRADriverGRPCServer(test.wantTimeout) if err != nil { t.Fatal(err) } - defer teardown() + defer draServerInfo.teardownFn() plg := plugin.NewRegistrationHandler() - if err := plg.RegisterPlugin(test.driverName, socketName, []string{"1.27"}); err != nil { + if err := plg.RegisterPlugin(test.driverName, draServerInfo.socketName, []string{"1.27"}); err != nil { t.Fatalf("failed to register plugin %s, err: %v", test.driverName, err) } defer plg.DeRegisterPlugin(test.driverName) // for sake of next tests @@ -666,6 +771,9 @@ func TestPrepareResources(t *testing.T) { } err = manager.PrepareResources(test.pod) + + assert.Equal(t, test.ExpectedPrepareCalls, draServerInfo.server.prepareResourceCalls.Load()) + if test.wantErr { assert.Error(t, err) return // PrepareResources returned an error so stopping the subtest here @@ -701,18 +809,18 @@ func TestPrepareResources(t *testing.T) { } } -func TestUnprepareResouces(t *testing.T) { +func TestUnprepareResources(t *testing.T) { fakeKubeClient := fake.NewSimpleClientset() for _, test := range []struct { - description string - driverName string - pod *v1.Pod - claimInfo *ClaimInfo - resourceClaim *resourcev1alpha2.ResourceClaim - wantErr bool - wantTimeout bool - wantResourceSkipped bool + description string + driverName string + pod *v1.Pod + claimInfo *ClaimInfo + wantErr bool + wantTimeout bool + wantResourceSkipped bool + expectedUnprepareCalls uint32 }{ { description: "plugin does not exist", @@ -750,27 +858,6 @@ func TestUnprepareResouces(t *testing.T) { }, }, }, - resourceClaim: &resourcev1alpha2.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "another-claim-test", - Namespace: "test-namespace", - UID: "test-reserved", - }, - Spec: resourcev1alpha2.ResourceClaimSpec{ - ResourceClassName: "test-class", - }, - Status: resourcev1alpha2.ResourceClaimStatus{ - DriverName: driverName, - Allocation: &resourcev1alpha2.AllocationResult{ - ResourceHandles: []resourcev1alpha2.ResourceHandle{ - {Data: "test-data", DriverName: driverName}, - }, - }, - ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{ - {UID: "test-reserved"}, - }, - }, - }, wantErr: true, }, { @@ -813,27 +900,6 @@ func TestUnprepareResouces(t *testing.T) { PodUIDs: sets.Set[string]{"test-reserved": sets.Empty{}, "test-reserved-2": sets.Empty{}}, }, }, - resourceClaim: &resourcev1alpha2.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod-claim-1", - Namespace: "test-namespace", - UID: "test-reserved", - }, - Spec: resourcev1alpha2.ResourceClaimSpec{ - ResourceClassName: "test-class", - }, - Status: resourcev1alpha2.ResourceClaimStatus{ - DriverName: driverName, - Allocation: &resourcev1alpha2.AllocationResult{ - ResourceHandles: []resourcev1alpha2.ResourceHandle{ - {Data: "test-data", DriverName: driverName}, - }, - }, - ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{ - {UID: "test-reserved"}, - }, - }, - }, wantResourceSkipped: true, }, { @@ -881,32 +947,12 @@ func TestUnprepareResouces(t *testing.T) { }, }, }, - resourceClaim: &resourcev1alpha2.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod-claim-2", - Namespace: "test-namespace", - UID: "test-reserved", - }, - Spec: resourcev1alpha2.ResourceClaimSpec{ - ResourceClassName: "test-class", - }, - Status: resourcev1alpha2.ResourceClaimStatus{ - DriverName: driverName, - Allocation: &resourcev1alpha2.AllocationResult{ - ResourceHandles: []resourcev1alpha2.ResourceHandle{ - {Data: "test-data", DriverName: driverName}, - }, - }, - ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{ - {UID: "test-reserved"}, - }, - }, - }, - wantErr: true, - wantTimeout: true, + wantErr: true, + wantTimeout: true, + expectedUnprepareCalls: 1, }, { - description: "should unprepare resource", + description: "should unprepare resource, claim previously prepared by currently running manager", driverName: driverName, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -949,28 +995,57 @@ func TestUnprepareResouces(t *testing.T) { }, }, }, + prepared: true, }, - resourceClaim: &resourcev1alpha2.ResourceClaim{ + expectedUnprepareCalls: 1, + }, + { + description: "should unprepare resource, claim previously was not prepared by currently running manager", + driverName: driverName, + pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod-claim-3", + Name: "test-pod", Namespace: "test-namespace", UID: "test-reserved", }, - Spec: resourcev1alpha2.ResourceClaimSpec{ - ResourceClassName: "test-class", + Spec: v1.PodSpec{ + ResourceClaims: []v1.PodResourceClaim{ + { + Name: "test-pod-claim", + Source: v1.ClaimSource{ResourceClaimName: func() *string { + s := "test-pod-claim" + return &s + }()}, + }, + }, + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Claims: []v1.ResourceClaim{ + { + Name: "test-pod-claim", + }, + }, + }, + }, + }, }, - Status: resourcev1alpha2.ResourceClaimStatus{ + }, + claimInfo: &ClaimInfo{ + ClaimInfoState: state.ClaimInfoState{ DriverName: driverName, - Allocation: &resourcev1alpha2.AllocationResult{ - ResourceHandles: []resourcev1alpha2.ResourceHandle{ - {Data: "test-data"}, + ClaimName: "test-pod-claim", + Namespace: "test-namespace", + ResourceHandles: []resourcev1alpha2.ResourceHandle{ + { + DriverName: driverName, + Data: "test data", }, }, - ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{ - {UID: "test-reserved"}, - }, }, + prepared: false, }, + expectedUnprepareCalls: 1, }, } { t.Run(test.description, func(t *testing.T) { @@ -979,14 +1054,14 @@ func TestUnprepareResouces(t *testing.T) { t.Fatalf("failed to create a new instance of the claimInfoCache, err: %v", err) } - socketName, teardown, err := setupFakeDRADriverGRPCServer(test.wantTimeout) + draServerInfo, err := setupFakeDRADriverGRPCServer(test.wantTimeout) if err != nil { t.Fatal(err) } - defer teardown() + defer draServerInfo.teardownFn() plg := plugin.NewRegistrationHandler() - if err := plg.RegisterPlugin(test.driverName, socketName, []string{"1.27"}); err != nil { + if err := plg.RegisterPlugin(test.driverName, draServerInfo.socketName, []string{"1.27"}); err != nil { t.Fatalf("failed to register plugin %s, err: %v", test.driverName, err) } defer plg.DeRegisterPlugin(test.driverName) // for sake of next tests @@ -1001,6 +1076,9 @@ func TestUnprepareResouces(t *testing.T) { } err = manager.UnprepareResources(test.pod) + + assert.Equal(t, test.expectedUnprepareCalls, draServerInfo.server.unprepareResourceCalls.Load()) + if test.wantErr { assert.Error(t, err) return // UnprepareResources returned an error so stopping the subtest here diff --git a/pkg/kubelet/cm/dra/plugin/client.go b/pkg/kubelet/cm/dra/plugin/client.go index e3a1e96756ca1..bbf0b1e92356f 100644 --- a/pkg/kubelet/cm/dra/plugin/client.go +++ b/pkg/kubelet/cm/dra/plugin/client.go @@ -24,18 +24,107 @@ import ( "google.golang.org/grpc" grpccodes "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" - "k8s.io/klog/v2" + "k8s.io/klog/v2" drapbv1alpha2 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" drapb "k8s.io/kubelet/pkg/apis/dra/v1alpha3" ) const PluginClientTimeout = 45 * time.Second -// draPluginClient encapsulates all dra plugin methods. -type draPluginClient struct { - pluginName string - plugin *Plugin +type ( + nodeResourceManager interface { + Prepare(context.Context, *grpc.ClientConn, *plugin, *drapb.NodePrepareResourcesRequest) (*drapb.NodePrepareResourcesResponse, error) + Unprepare(context.Context, *grpc.ClientConn, *plugin, *drapb.NodeUnprepareResourcesRequest) (*drapb.NodeUnprepareResourcesResponse, error) + } + + v1alpha2NodeResourceManager struct{} + v1alpha3NodeResourceManager struct{} +) + +var nodeResourceManagers = map[string]nodeResourceManager{ + v1alpha2Version: v1alpha2NodeResourceManager{}, + v1alpha3Version: v1alpha3NodeResourceManager{}, +} + +func (v1alpha2rm v1alpha2NodeResourceManager) Prepare(ctx context.Context, conn *grpc.ClientConn, _ *plugin, req *drapb.NodePrepareResourcesRequest) (*drapb.NodePrepareResourcesResponse, error) { + nodeClient := drapbv1alpha2.NewNodeClient(conn) + response := &drapb.NodePrepareResourcesResponse{ + Claims: make(map[string]*drapb.NodePrepareResourceResponse), + } + + for _, claim := range req.Claims { + res, err := nodeClient.NodePrepareResource(ctx, + &drapbv1alpha2.NodePrepareResourceRequest{ + Namespace: claim.Namespace, + ClaimUid: claim.Uid, + ClaimName: claim.Name, + ResourceHandle: claim.ResourceHandle, + }) + result := &drapb.NodePrepareResourceResponse{} + if err != nil { + result.Error = err.Error() + } else { + result.CDIDevices = res.CdiDevices + } + response.Claims[claim.Uid] = result + } + + return response, nil +} + +func (v1alpha2rm v1alpha2NodeResourceManager) Unprepare(ctx context.Context, conn *grpc.ClientConn, _ *plugin, req *drapb.NodeUnprepareResourcesRequest) (*drapb.NodeUnprepareResourcesResponse, error) { + nodeClient := drapbv1alpha2.NewNodeClient(conn) + response := &drapb.NodeUnprepareResourcesResponse{ + Claims: make(map[string]*drapb.NodeUnprepareResourceResponse), + } + + for _, claim := range req.Claims { + _, err := nodeClient.NodeUnprepareResource(ctx, + &drapbv1alpha2.NodeUnprepareResourceRequest{ + Namespace: claim.Namespace, + ClaimUid: claim.Uid, + ClaimName: claim.Name, + ResourceHandle: claim.ResourceHandle, + }) + result := &drapb.NodeUnprepareResourceResponse{} + if err != nil { + result.Error = err.Error() + } + response.Claims[claim.Uid] = result + } + + return response, nil +} + +func (v1alpha3rm v1alpha3NodeResourceManager) Prepare(ctx context.Context, conn *grpc.ClientConn, p *plugin, req *drapb.NodePrepareResourcesRequest) (*drapb.NodePrepareResourcesResponse, error) { + nodeClient := drapb.NewNodeClient(conn) + response, err := nodeClient.NodePrepareResources(ctx, req) + if err != nil { + status, _ := grpcstatus.FromError(err) + if status.Code() == grpccodes.Unimplemented { + p.setVersion(v1alpha2Version) + return nodeResourceManagers[v1alpha2Version].Prepare(ctx, conn, p, req) + } + return nil, err + } + + return response, nil +} + +func (v1alpha3rm v1alpha3NodeResourceManager) Unprepare(ctx context.Context, conn *grpc.ClientConn, p *plugin, req *drapb.NodeUnprepareResourcesRequest) (*drapb.NodeUnprepareResourcesResponse, error) { + nodeClient := drapb.NewNodeClient(conn) + response, err := nodeClient.NodeUnprepareResources(ctx, req) + if err != nil { + status, _ := grpcstatus.FromError(err) + if status.Code() == grpccodes.Unimplemented { + p.setVersion(v1alpha2Version) + return nodeResourceManagers[v1alpha2Version].Unprepare(ctx, conn, p, req) + } + return nil, err + } + + return response, nil } func NewDRAPluginClient(pluginName string) (drapb.NodeClient, error) { @@ -43,111 +132,68 @@ func NewDRAPluginClient(pluginName string) (drapb.NodeClient, error) { return nil, fmt.Errorf("plugin name is empty") } - existingPlugin := draPlugins.Get(pluginName) + existingPlugin := draPlugins.get(pluginName) if existingPlugin == nil { return nil, fmt.Errorf("plugin name %s not found in the list of registered DRA plugins", pluginName) } - return &draPluginClient{ - pluginName: pluginName, - plugin: existingPlugin, - }, nil + return existingPlugin, nil } -func (r *draPluginClient) NodePrepareResources( +func (p *plugin) NodePrepareResources( ctx context.Context, req *drapb.NodePrepareResourcesRequest, opts ...grpc.CallOption, -) (resp *drapb.NodePrepareResourcesResponse, err error) { +) (*drapb.NodePrepareResourcesResponse, error) { logger := klog.FromContext(ctx) logger.V(4).Info(log("calling NodePrepareResources rpc"), "request", req) - defer logger.V(4).Info(log("done calling NodePrepareResources rpc"), "response", resp, "err", err) - conn, err := r.plugin.getOrCreateGRPCConn() + conn, err := p.getOrCreateGRPCConn() if err != nil { return nil, err } - nodeClient := drapb.NewNodeClient(conn) - nodeClientOld := drapbv1alpha2.NewNodeClient(conn) ctx, cancel := context.WithTimeout(ctx, PluginClientTimeout) defer cancel() - resp, err = nodeClient.NodePrepareResources(ctx, req) - if err != nil { - status, _ := grpcstatus.FromError(err) - if status.Code() == grpccodes.Unimplemented { - // Fall back to the older gRPC API. - resp = &drapb.NodePrepareResourcesResponse{ - Claims: make(map[string]*drapb.NodePrepareResourceResponse), - } - err = nil - for _, claim := range req.Claims { - respOld, errOld := nodeClientOld.NodePrepareResource(ctx, - &drapbv1alpha2.NodePrepareResourceRequest{ - Namespace: claim.Namespace, - ClaimUid: claim.Uid, - ClaimName: claim.Name, - ResourceHandle: claim.ResourceHandle, - }) - result := &drapb.NodePrepareResourceResponse{} - if errOld != nil { - result.Error = errOld.Error() - } else { - result.CDIDevices = respOld.CdiDevices - } - resp.Claims[claim.Uid] = result - } - } + version := p.getVersion() + resourceManager, exists := nodeResourceManagers[version] + if !exists { + err := fmt.Errorf("unsupported plugin version: %s", version) + logger.V(4).Info(log("done calling NodePrepareResources rpc"), "response", nil, "err", err) + return nil, err } - return + response, err := resourceManager.Prepare(ctx, conn, p, req) + logger.V(4).Info(log("done calling NodePrepareResources rpc"), "response", response, "err", err) + return response, err } -func (r *draPluginClient) NodeUnprepareResources( +func (p *plugin) NodeUnprepareResources( ctx context.Context, req *drapb.NodeUnprepareResourcesRequest, opts ...grpc.CallOption, -) (resp *drapb.NodeUnprepareResourcesResponse, err error) { +) (*drapb.NodeUnprepareResourcesResponse, error) { logger := klog.FromContext(ctx) logger.V(4).Info(log("calling NodeUnprepareResource rpc"), "request", req) - defer logger.V(4).Info(log("done calling NodeUnprepareResources rpc"), "response", resp, "err", err) - conn, err := r.plugin.getOrCreateGRPCConn() + conn, err := p.getOrCreateGRPCConn() if err != nil { return nil, err } - nodeClient := drapb.NewNodeClient(conn) - nodeClientOld := drapbv1alpha2.NewNodeClient(conn) ctx, cancel := context.WithTimeout(ctx, PluginClientTimeout) defer cancel() - resp, err = nodeClient.NodeUnprepareResources(ctx, req) - if err != nil { - status, _ := grpcstatus.FromError(err) - if status.Code() == grpccodes.Unimplemented { - // Fall back to the older gRPC API. - resp = &drapb.NodeUnprepareResourcesResponse{ - Claims: make(map[string]*drapb.NodeUnprepareResourceResponse), - } - err = nil - for _, claim := range req.Claims { - _, errOld := nodeClientOld.NodeUnprepareResource(ctx, - &drapbv1alpha2.NodeUnprepareResourceRequest{ - Namespace: claim.Namespace, - ClaimUid: claim.Uid, - ClaimName: claim.Name, - ResourceHandle: claim.ResourceHandle, - }) - result := &drapb.NodeUnprepareResourceResponse{} - if errOld != nil { - result.Error = errOld.Error() - } - resp.Claims[claim.Uid] = result - } - } + version := p.getVersion() + resourceManager, exists := nodeResourceManagers[version] + if !exists { + err := fmt.Errorf("unsupported plugin version: %s", version) + logger.V(4).Info(log("done calling NodeUnprepareResources rpc"), "response", nil, "err", err) + return nil, err } - return + response, err := resourceManager.Unprepare(ctx, conn, p, req) + logger.V(4).Info(log("done calling NodeUnprepareResources rpc"), "response", response, "err", err) + return response, err } diff --git a/pkg/kubelet/cm/dra/plugin/client_test.go b/pkg/kubelet/cm/dra/plugin/client_test.go index 18c37a1d1ee26..3e1889e2c9787 100644 --- a/pkg/kubelet/cm/dra/plugin/client_test.go +++ b/pkg/kubelet/cm/dra/plugin/client_test.go @@ -18,31 +18,46 @@ package plugin import ( "context" + "fmt" "net" "os" "path/filepath" "sync" "testing" + "github.com/stretchr/testify/assert" "google.golang.org/grpc" - drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1alpha3" + drapbv1alpha2 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapbv1alpha3 "k8s.io/kubelet/pkg/apis/dra/v1alpha3" ) -type fakeGRPCServer struct { - drapbv1.UnimplementedNodeServer +type fakeV1alpha3GRPCServer struct { + drapbv1alpha3.UnimplementedNodeServer } -func (f *fakeGRPCServer) NodePrepareResource(ctx context.Context, in *drapbv1.NodePrepareResourcesRequest) (*drapbv1.NodePrepareResourcesResponse, error) { - return &drapbv1.NodePrepareResourcesResponse{Claims: map[string]*drapbv1.NodePrepareResourceResponse{"dummy": {CDIDevices: []string{"dummy"}}}}, nil +func (f *fakeV1alpha3GRPCServer) NodePrepareResource(ctx context.Context, in *drapbv1alpha3.NodePrepareResourcesRequest) (*drapbv1alpha3.NodePrepareResourcesResponse, error) { + return &drapbv1alpha3.NodePrepareResourcesResponse{Claims: map[string]*drapbv1alpha3.NodePrepareResourceResponse{"dummy": {CDIDevices: []string{"dummy"}}}}, nil } -func (f *fakeGRPCServer) NodeUnprepareResource(ctx context.Context, in *drapbv1.NodeUnprepareResourcesRequest) (*drapbv1.NodeUnprepareResourcesResponse, error) { - return &drapbv1.NodeUnprepareResourcesResponse{}, nil +func (f *fakeV1alpha3GRPCServer) NodeUnprepareResource(ctx context.Context, in *drapbv1alpha3.NodeUnprepareResourcesRequest) (*drapbv1alpha3.NodeUnprepareResourcesResponse, error) { + return &drapbv1alpha3.NodeUnprepareResourcesResponse{}, nil +} + +type fakeV1alpha2GRPCServer struct { + drapbv1alpha2.UnimplementedNodeServer +} + +func (f *fakeV1alpha2GRPCServer) NodePrepareResource(ctx context.Context, in *drapbv1alpha2.NodePrepareResourceRequest) (*drapbv1alpha2.NodePrepareResourceResponse, error) { + return &drapbv1alpha2.NodePrepareResourceResponse{CdiDevices: []string{"dummy"}}, nil +} + +func (f *fakeV1alpha2GRPCServer) NodeUnprepareResource(ctx context.Context, in *drapbv1alpha2.NodeUnprepareResourceRequest) (*drapbv1alpha2.NodeUnprepareResourceResponse, error) { + return &drapbv1alpha2.NodeUnprepareResourceResponse{}, nil } type tearDown func() -func setupFakeGRPCServer() (string, tearDown, error) { +func setupFakeGRPCServer(version string) (string, tearDown, error) { p, err := os.MkdirTemp("", "dra_plugin") if err != nil { return "", nil, err @@ -62,8 +77,16 @@ func setupFakeGRPCServer() (string, tearDown, error) { } s := grpc.NewServer() - fakeGRPCServer := &fakeGRPCServer{} - drapbv1.RegisterNodeServer(s, fakeGRPCServer) + switch version { + case v1alpha2Version: + fakeGRPCServer := &fakeV1alpha2GRPCServer{} + drapbv1alpha2.RegisterNodeServer(s, fakeGRPCServer) + case v1alpha3Version: + fakeGRPCServer := &fakeV1alpha3GRPCServer{} + drapbv1alpha3.RegisterNodeServer(s, fakeGRPCServer) + default: + return "", nil, fmt.Errorf("unsupported version: %s", version) + } go func() { go s.Serve(listener) @@ -75,7 +98,7 @@ func setupFakeGRPCServer() (string, tearDown, error) { } func TestGRPCConnIsReused(t *testing.T) { - addr, teardown, err := setupFakeGRPCServer() + addr, teardown, err := setupFakeGRPCServer(v1alpha3Version) if err != nil { t.Fatal(err) } @@ -85,11 +108,12 @@ func TestGRPCConnIsReused(t *testing.T) { wg := sync.WaitGroup{} m := sync.Mutex{} - plugin := &Plugin{ + p := &plugin{ endpoint: addr, + version: v1alpha3Version, } - conn, err := plugin.getOrCreateGRPCConn() + conn, err := p.getOrCreateGRPCConn() defer func() { err := conn.Close() if err != nil { @@ -101,7 +125,8 @@ func TestGRPCConnIsReused(t *testing.T) { } // ensure the plugin we are using is registered - draPlugins.Set("dummy-plugin", plugin) + draPlugins.add("dummy-plugin", p) + defer draPlugins.delete("dummy-plugin") // we call `NodePrepareResource` 2 times and check whether a new connection is created or the same is reused for i := 0; i < 2; i++ { @@ -114,8 +139,8 @@ func TestGRPCConnIsReused(t *testing.T) { return } - req := &drapbv1.NodePrepareResourcesRequest{ - Claims: []*drapbv1.Claim{ + req := &drapbv1alpha3.NodePrepareResourcesRequest{ + Claims: []*drapbv1alpha3.Claim{ { Namespace: "dummy-namespace", Uid: "dummy-uid", @@ -126,9 +151,9 @@ func TestGRPCConnIsReused(t *testing.T) { } client.NodePrepareResources(context.TODO(), req) - client.(*draPluginClient).plugin.Lock() - conn := client.(*draPluginClient).plugin.conn - client.(*draPluginClient).plugin.Unlock() + client.(*plugin).Lock() + conn := client.(*plugin).conn + client.(*plugin).Unlock() m.Lock() defer m.Unlock() @@ -144,6 +169,122 @@ func TestGRPCConnIsReused(t *testing.T) { if counter, ok := reusedConns[conn]; ok && counter != 2 { t.Errorf("expected counter to be 2 but got %d", counter) } +} + +func TestNewDRAPluginClient(t *testing.T) { + for _, test := range []struct { + description string + setup func(string) tearDown + pluginName string + shouldError bool + }{ + { + description: "plugin name is empty", + setup: func(_ string) tearDown { + return func() {} + }, + pluginName: "", + shouldError: true, + }, + { + description: "plugin name not found in the list", + setup: func(_ string) tearDown { + return func() {} + }, + pluginName: "plugin-name-not-found-in-the-list", + shouldError: true, + }, + { + description: "plugin exists", + setup: func(name string) tearDown { + draPlugins.add(name, &plugin{}) + return func() { + draPlugins.delete(name) + } + }, + pluginName: "dummy-plugin", + }, + } { + t.Run(test.description, func(t *testing.T) { + teardown := test.setup(test.pluginName) + defer teardown() + + client, err := NewDRAPluginClient(test.pluginName) + if test.shouldError { + assert.Nil(t, client) + assert.Error(t, err) + } else { + assert.NotNil(t, client) + assert.Nil(t, err) + } + }) + } +} + +func TestNodeUnprepareResource(t *testing.T) { + for _, test := range []struct { + description string + serverSetup func(string) (string, tearDown, error) + serverVersion string + request *drapbv1alpha3.NodeUnprepareResourcesRequest + }{ + { + description: "server supports v1alpha3", + serverSetup: setupFakeGRPCServer, + serverVersion: v1alpha3Version, + request: &drapbv1alpha3.NodeUnprepareResourcesRequest{}, + }, + { + description: "server supports v1alpha2, plugin client should fallback", + serverSetup: setupFakeGRPCServer, + serverVersion: v1alpha2Version, + request: &drapbv1alpha3.NodeUnprepareResourcesRequest{ + Claims: []*drapbv1alpha3.Claim{ + { + Namespace: "dummy-namespace", + Uid: "dummy-uid", + Name: "dummy-claim", + ResourceHandle: "dummy-resource", + }, + }, + }, + }, + } { + t.Run(test.description, func(t *testing.T) { + addr, teardown, err := setupFakeGRPCServer(test.serverVersion) + if err != nil { + t.Fatal(err) + } + defer teardown() + + p := &plugin{ + endpoint: addr, + version: v1alpha3Version, + } + + conn, err := p.getOrCreateGRPCConn() + defer func() { + err := conn.Close() + if err != nil { + t.Error(err) + } + }() + if err != nil { + t.Fatal(err) + } + + draPlugins.add("dummy-plugin", p) + defer draPlugins.delete("dummy-plugin") - draPlugins.Delete("dummy-plugin") + client, err := NewDRAPluginClient("dummy-plugin") + if err != nil { + t.Fatal(err) + } + + _, err = client.NodeUnprepareResources(context.TODO(), test.request) + if err != nil { + t.Fatal(err) + } + }) + } } diff --git a/pkg/kubelet/cm/dra/plugin/plugin.go b/pkg/kubelet/cm/dra/plugin/plugin.go index b5eabe4e6bb81..94a9c7354de55 100644 --- a/pkg/kubelet/cm/dra/plugin/plugin.go +++ b/pkg/kubelet/cm/dra/plugin/plugin.go @@ -17,22 +17,81 @@ limitations under the License. package plugin import ( + "context" "errors" "fmt" + "net" "strings" + "sync" + "time" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/klog/v2" ) const ( // DRAPluginName is the name of the in-tree DRA Plugin. - DRAPluginName = "kubernetes.io/dra" + DRAPluginName = "kubernetes.io/dra" + v1alpha3Version = "v1alpha3" + v1alpha2Version = "v1alpha2" ) -// draPlugins map keeps track of all registered DRA plugins on the node -// and their corresponding sockets. -var draPlugins = &PluginsStore{} +// Plugin is a description of a DRA Plugin, defined by an endpoint +// and the highest DRA version supported. +type plugin struct { + sync.Mutex + conn *grpc.ClientConn + endpoint string + version string + highestSupportedVersion *utilversion.Version +} + +func (p *plugin) getOrCreateGRPCConn() (*grpc.ClientConn, error) { + p.Lock() + defer p.Unlock() + + if p.conn != nil { + return p.conn, nil + } + + network := "unix" + klog.V(4).InfoS(log("creating new gRPC connection"), "protocol", network, "endpoint", p.endpoint) + conn, err := grpc.Dial( + p.endpoint, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(func(ctx context.Context, target string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, target) + }), + ) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + if ok := conn.WaitForStateChange(ctx, connectivity.Connecting); !ok { + return nil, errors.New("timed out waiting for gRPC connection to be ready") + } + + p.conn = conn + return p.conn, nil +} + +func (p *plugin) getVersion() string { + p.Lock() + defer p.Unlock() + return p.version +} + +func (p *plugin) setVersion(version string) { + p.Lock() + p.version = version + p.Unlock() +} // RegistrationHandler is the handler which is fed to the pluginwatcher API. type RegistrationHandler struct{} @@ -53,56 +112,17 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, // Storing endpoint of newly registered DRA Plugin into the map, where plugin name will be the key // all other DRA components will be able to get the actual socket of DRA plugins by its name. - draPlugins.Set(pluginName, &Plugin{ + // By default we assume the supported plugin version is v1alpha3 + draPlugins.add(pluginName, &plugin{ conn: nil, endpoint: endpoint, + version: v1alpha3Version, highestSupportedVersion: highestSupportedVersion, }) return nil } -// Return the highest supported version. -func highestSupportedVersion(versions []string) (*utilversion.Version, error) { - if len(versions) == 0 { - return nil, errors.New(log("DRA plugin reporting empty array for supported versions")) - } - - var highestSupportedVersion *utilversion.Version - - var theErr error - - for i := len(versions) - 1; i >= 0; i-- { - currentHighestVer, err := utilversion.ParseGeneric(versions[i]) - if err != nil { - theErr = err - - continue - } - - if currentHighestVer.Major() > 1 { - // DRA currently only has version 1.x - continue - } - - if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { - highestSupportedVersion = currentHighestVer - } - } - - if highestSupportedVersion == nil { - return nil, fmt.Errorf( - "could not find a highest supported version from versions (%v) reported by this plugin: %+v", - versions, theErr) - } - - if highestSupportedVersion.Major() != 1 { - return nil, fmt.Errorf("highest supported version reported by plugin is %v, must be v1.x", highestSupportedVersion) - } - - return highestSupportedVersion, nil -} - func (h *RegistrationHandler) validateVersions( callerName string, pluginName string, @@ -119,7 +139,7 @@ func (h *RegistrationHandler) validateVersions( } // Validate version - newPluginHighestVersion, err := highestSupportedVersion(versions) + newPluginHighestVersion, err := utilversion.HighestSupportedVersion(versions) if err != nil { return nil, errors.New( log( @@ -132,32 +152,32 @@ func (h *RegistrationHandler) validateVersions( ) } - existingPlugin := draPlugins.Get(pluginName) - if existingPlugin != nil { - if !existingPlugin.highestSupportedVersion.LessThan(newPluginHighestVersion) { - return nil, errors.New( - log( - "%s for DRA plugin %q failed. Another plugin with the same name is already registered with a higher supported version: %q", - callerName, - pluginName, - existingPlugin.highestSupportedVersion, - ), - ) - } + existingPlugin := draPlugins.get(pluginName) + if existingPlugin == nil { + return newPluginHighestVersion, nil } - - return newPluginHighestVersion, nil + if existingPlugin.highestSupportedVersion.LessThan(newPluginHighestVersion) { + return newPluginHighestVersion, nil + } + return nil, errors.New( + log( + "%s for DRA plugin %q failed. Another plugin with the same name is already registered with a higher supported version: %q", + callerName, + pluginName, + existingPlugin.highestSupportedVersion, + ), + ) } -func unregisterPlugin(pluginName string) { - draPlugins.Delete(pluginName) +func deregisterPlugin(pluginName string) { + draPlugins.delete(pluginName) } // DeRegisterPlugin is called when a plugin has removed its socket, // signaling it is no longer available. func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) { klog.InfoS("DeRegister DRA plugin", "name", pluginName) - unregisterPlugin(pluginName) + deregisterPlugin(pluginName) } // ValidatePlugin is called by kubelet's plugin watcher upon detection diff --git a/pkg/kubelet/cm/dra/plugin/plugin_test.go b/pkg/kubelet/cm/dra/plugin/plugin_test.go new file mode 100644 index 0000000000000..70499b260c848 --- /dev/null +++ b/pkg/kubelet/cm/dra/plugin/plugin_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegistrationHandler_ValidatePlugin(t *testing.T) { + for _, test := range []struct { + description string + handler func() *RegistrationHandler + pluginName string + endpoint string + versions []string + shouldError bool + }{ + { + description: "no versions provided", + handler: NewRegistrationHandler, + shouldError: true, + }, + { + description: "unsupported version", + handler: NewRegistrationHandler, + versions: []string{"v2.0.0"}, + shouldError: true, + }, + { + description: "plugin already registered with a higher supported version", + handler: func() *RegistrationHandler { + handler := NewRegistrationHandler() + if err := handler.RegisterPlugin("this-plugin-already-exists-and-has-a-long-name-so-it-doesnt-collide", "", []string{"v1.1.0"}); err != nil { + t.Fatal(err) + } + return handler + }, + pluginName: "this-plugin-already-exists-and-has-a-long-name-so-it-doesnt-collide", + versions: []string{"v1.0.0"}, + shouldError: true, + }, + { + description: "should validate the plugin", + handler: NewRegistrationHandler, + pluginName: "this-is-a-dummy-plugin-with-a-long-name-so-it-doesnt-collide", + versions: []string{"v1.3.0"}, + }, + } { + t.Run(test.description, func(t *testing.T) { + handler := test.handler() + err := handler.ValidatePlugin(test.pluginName, test.endpoint, test.versions) + if test.shouldError { + assert.Error(t, err) + } else { + assert.Nil(t, err) + } + }) + } + + t.Cleanup(func() { + handler := NewRegistrationHandler() + handler.DeRegisterPlugin("this-plugin-already-exists-and-has-a-long-name-so-it-doesnt-collide") + handler.DeRegisterPlugin("this-is-a-dummy-plugin-with-a-long-name-so-it-doesnt-collide") + }) +} diff --git a/pkg/kubelet/cm/dra/plugin/plugins_store.go b/pkg/kubelet/cm/dra/plugin/plugins_store.go index 32f750af80d06..aa1449e5913de 100644 --- a/pkg/kubelet/cm/dra/plugin/plugins_store.go +++ b/pkg/kubelet/cm/dra/plugin/plugins_store.go @@ -17,69 +17,24 @@ limitations under the License. package plugin import ( - "context" - "errors" - "net" "sync" - "time" - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/klog/v2" ) -// Plugin is a description of a DRA Plugin, defined by an endpoint -// and the highest DRA version supported. -type Plugin struct { - sync.RWMutex - conn *grpc.ClientConn - endpoint string - highestSupportedVersion *utilversion.Version -} - -func (p *Plugin) getOrCreateGRPCConn() (*grpc.ClientConn, error) { - p.Lock() - defer p.Unlock() - - if p.conn != nil { - return p.conn, nil - } - - network := "unix" - klog.V(4).InfoS(log("creating new gRPC connection"), "protocol", network, "endpoint", p.endpoint) - conn, err := grpc.Dial( - p.endpoint, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithContextDialer(func(ctx context.Context, target string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, target) - }), - ) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - if ok := conn.WaitForStateChange(ctx, connectivity.Connecting); !ok { - return nil, errors.New("timed out waiting for gRPC connection to be ready") - } - - p.conn = conn - return p.conn, nil -} - // PluginsStore holds a list of DRA Plugins. -type PluginsStore struct { +type pluginsStore struct { sync.RWMutex - store map[string]*Plugin + store map[string]*plugin } +// draPlugins map keeps track of all registered DRA plugins on the node +// and their corresponding sockets. +var draPlugins = &pluginsStore{} + // Get lets you retrieve a DRA Plugin by name. // This method is protected by a mutex. -func (s *PluginsStore) Get(pluginName string) *Plugin { +func (s *pluginsStore) get(pluginName string) *plugin { s.RLock() defer s.RUnlock() @@ -88,31 +43,26 @@ func (s *PluginsStore) Get(pluginName string) *Plugin { // Set lets you save a DRA Plugin to the list and give it a specific name. // This method is protected by a mutex. -func (s *PluginsStore) Set(pluginName string, plugin *Plugin) { +func (s *pluginsStore) add(pluginName string, p *plugin) { s.Lock() defer s.Unlock() if s.store == nil { - s.store = make(map[string]*Plugin) + s.store = make(map[string]*plugin) } - s.store[pluginName] = plugin + _, exists := s.store[pluginName] + if exists { + klog.V(1).InfoS(log("plugin: %s already registered, previous plugin will be overridden", pluginName)) + } + s.store[pluginName] = p } // Delete lets you delete a DRA Plugin by name. // This method is protected by a mutex. -func (s *PluginsStore) Delete(pluginName string) { +func (s *pluginsStore) delete(pluginName string) { s.Lock() defer s.Unlock() delete(s.store, pluginName) } - -// Clear deletes all entries in the store. -// This methiod is protected by a mutex. -func (s *PluginsStore) Clear() { - s.Lock() - defer s.Unlock() - - s.store = make(map[string]*Plugin) -} diff --git a/pkg/kubelet/cm/helpers.go b/pkg/kubelet/cm/helpers.go index 6be3e27230724..d1c0beabf74ed 100644 --- a/pkg/kubelet/cm/helpers.go +++ b/pkg/kubelet/cm/helpers.go @@ -52,14 +52,14 @@ func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.Res return ret } -func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.String) { +func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.Set[string]) { podSandboxMap := make(map[string]string) podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil) for _, p := range podSandboxList { podSandboxMap[p.Id] = p.Metadata.Uid } - runningSet := sets.NewString() + runningSet := sets.New[string]() containerMap := containermap.NewContainerMap() containerList, _ := runtimeService.ListContainers(ctx, nil) for _, c := range containerList { diff --git a/pkg/kubelet/cm/internal_container_lifecycle_linux.go b/pkg/kubelet/cm/internal_container_lifecycle_linux.go index cb7c0cfa543f9..0c3bb2e49992f 100644 --- a/pkg/kubelet/cm/internal_container_lifecycle_linux.go +++ b/pkg/kubelet/cm/internal_container_lifecycle_linux.go @@ -23,7 +23,8 @@ import ( "strconv" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ) @@ -39,7 +40,7 @@ func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, contain numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container) if numaNodes.Len() > 0 { var affinity []string - for _, numaNode := range numaNodes.List() { + for _, numaNode := range sets.List(numaNodes) { affinity = append(affinity, strconv.Itoa(numaNode)) } containerConfig.Linux.Resources.CpusetMems = strings.Join(affinity, ",") diff --git a/pkg/kubelet/cm/memorymanager/fake_memory_manager.go b/pkg/kubelet/cm/memorymanager/fake_memory_manager.go index 364614502d457..46874e500500c 100644 --- a/pkg/kubelet/cm/memorymanager/fake_memory_manager.go +++ b/pkg/kubelet/cm/memorymanager/fake_memory_manager.go @@ -50,7 +50,7 @@ func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, contain klog.InfoS("Add container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID) } -func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int { +func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] { klog.InfoS("Get MemoryNUMANodes", "pod", klog.KObj(pod), "containerName", container.Name) return nil } diff --git a/pkg/kubelet/cm/memorymanager/memory_manager.go b/pkg/kubelet/cm/memorymanager/memory_manager.go index b8c55b74597d8..04cf2d6f533df 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager.go @@ -83,7 +83,7 @@ type Manager interface { GetPodTopologyHints(*v1.Pod) map[string][]topologymanager.TopologyHint // GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory - GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int + GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] // GetAllocatableMemory returns the amount of allocatable memory for each NUMA node GetAllocatableMemory() []state.Block @@ -213,9 +213,9 @@ func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID } // GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory -func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int { +func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] { // Get NUMA node affinity of blocks assigned to the container during Allocate() - numaNodes := sets.NewInt() + numaNodes := sets.New[int]() for _, block := range m.state.GetMemoryBlocks(string(pod.UID), container.Name) { for _, nodeID := range block.NUMAAffinity { // avoid nodes duplication when hugepages and memory blocks pinned to the same NUMA node diff --git a/pkg/kubelet/cm/topologymanager/policy_options.go b/pkg/kubelet/cm/topologymanager/policy_options.go index 15f94c696d2f9..1ef211cd0bdee 100644 --- a/pkg/kubelet/cm/topologymanager/policy_options.go +++ b/pkg/kubelet/cm/topologymanager/policy_options.go @@ -30,11 +30,11 @@ const ( ) var ( - alphaOptions = sets.NewString() - betaOptions = sets.NewString( + alphaOptions = sets.New[string]() + betaOptions = sets.New[string]( PreferClosestNUMANodes, ) - stableOptions = sets.NewString() + stableOptions = sets.New[string]() ) func CheckPolicyOptionAvailable(option string) error { diff --git a/pkg/kubelet/cm/topologymanager/policy_options_test.go b/pkg/kubelet/cm/topologymanager/policy_options_test.go index d01b63db7993e..c9872ec76b9b8 100644 --- a/pkg/kubelet/cm/topologymanager/policy_options_test.go +++ b/pkg/kubelet/cm/topologymanager/policy_options_test.go @@ -112,7 +112,7 @@ func TestNewTopologyManagerOptions(t *testing.T) { } betaOptions.Insert(fancyBetaOption) - alphaOptions = sets.NewString(fancyAlphaOption) + alphaOptions = sets.New[string](fancyAlphaOption) for _, tcase := range testCases { t.Run(tcase.description, func(t *testing.T) { diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index 7dade01b9c3a8..173c7bdd39fb5 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -120,6 +120,10 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v return false, pod, fmt.Errorf("invalid pod: %#v", obj) } + if newPod.Name == "" { + return true, pod, fmt.Errorf("invalid pod: name is needed for the pod") + } + // Apply default values and validate the pod. if err = defaultFn(newPod); err != nil { return true, pod, err @@ -151,6 +155,9 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1. // Apply default values and validate pods. for i := range newPods.Items { newPod := &newPods.Items[i] + if newPod.Name == "" { + return true, pods, fmt.Errorf("invalid pod: name is needed for the pod") + } if err = defaultFn(newPod); err != nil { return true, pods, err } diff --git a/pkg/kubelet/container/cache.go b/pkg/kubelet/container/cache.go index a43f07b31ab50..9b4138634d3f8 100644 --- a/pkg/kubelet/container/cache.go +++ b/pkg/kubelet/container/cache.go @@ -157,29 +157,6 @@ func (c *cache) get(id types.UID) *data { // Otherwise, it returns nil. The caller should acquire the lock. func (c *cache) getIfNewerThan(id types.UID, minTime time.Time) *data { d, ok := c.pods[id] - if utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) { - // Evented PLEG has CREATED, STARTED, STOPPED and DELETED events - // However if the container creation fails for some reason there is no - // CRI event received by the kubelet and that pod will get stuck a - // GetNewerThan call in the pod workers. This is reproducible with - // the node e2e test, - // https://github.com/kubernetes/kubernetes/blob/83415e5c9e6e59a3d60a148160490560af2178a1/test/e2e_node/pod_hostnamefqdn_test.go#L161 - // which forces failure during pod creation. This issue also exists in - // Generic PLEG but since it updates global timestamp periodically - // the GetNewerThan call gets unstuck. - - // During node e2e tests, it was observed this change does not have any - // adverse impact on the behaviour of the Generic PLEG as well. - switch { - case !ok: - return makeDefaultData(id) - case ok && (d.modified.After(minTime) || (c.timestamp != nil && c.timestamp.After(minTime))): - return d - default: - return nil - } - } - globalTimestampIsNewer := (c.timestamp != nil && c.timestamp.After(minTime)) if !ok && globalTimestampIsNewer { // Status is not cached, but the global timestamp is newer than diff --git a/pkg/kubelet/cri/remote/fake/fake_image_service.go b/pkg/kubelet/cri/remote/fake/fake_image_service.go index 0e10e5ae8aa6a..68aba284f017e 100644 --- a/pkg/kubelet/cri/remote/fake/fake_image_service.go +++ b/pkg/kubelet/cri/remote/fake/fake_image_service.go @@ -72,10 +72,10 @@ func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImag // ImageFsInfo returns information of the filesystem that is used to store images. func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *kubeapi.ImageFsInfoRequest) (*kubeapi.ImageFsInfoResponse, error) { - fsUsage, err := f.ImageService.ImageFsInfo(ctx) + resp, err := f.ImageService.ImageFsInfo(ctx) if err != nil { return nil, err } - return &kubeapi.ImageFsInfoResponse{ImageFilesystems: fsUsage}, nil + return resp, nil } diff --git a/pkg/kubelet/cri/remote/remote_image.go b/pkg/kubelet/cri/remote/remote_image.go index a1afc80b8a28e..d58da81828d25 100644 --- a/pkg/kubelet/cri/remote/remote_image.go +++ b/pkg/kubelet/cri/remote/remote_image.go @@ -217,7 +217,7 @@ func (r *remoteImageService) RemoveImage(ctx context.Context, image *runtimeapi. } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *remoteImageService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (r *remoteImageService) ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) { // Do not set timeout, because `ImageFsInfo` takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? ctx, cancel := context.WithCancel(ctx) @@ -226,11 +226,11 @@ func (r *remoteImageService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.Fil return r.imageFsInfoV1(ctx) } -func (r *remoteImageService) imageFsInfoV1(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (r *remoteImageService) imageFsInfoV1(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) { resp, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}) if err != nil { klog.ErrorS(err, "ImageFsInfo from image service failed") return nil, err } - return resp.GetImageFilesystems(), nil + return resp, nil } diff --git a/pkg/kubelet/cri/remote/remote_runtime.go b/pkg/kubelet/cri/remote/remote_runtime.go index 64571069376bc..36b3839eb7402 100644 --- a/pkg/kubelet/cri/remote/remote_runtime.go +++ b/pkg/kubelet/cri/remote/remote_runtime.go @@ -674,9 +674,7 @@ func (r *remoteRuntimeService) containerStatsV1(ctx context.Context, containerID // ListContainerStats returns the list of ContainerStats given the filter. func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats", "filter", filter) - // Do not set timeout, because writable layer stats collection takes time. - // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithTimeout(ctx, r.timeout) defer cancel() return r.listContainerStatsV1(ctx, filter) diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index 81562c671d31c..75fe9fb9263d2 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -117,6 +117,32 @@ func makePodWithDiskStats(name string, priority int32, requests v1.ResourceList, return pod, podStats } +func makePodWithLocalStorageCapacityIsolationOpen(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, memoryWorkingSet string) (*v1.Pod, statsapi.PodStats) { + vol := newVolume("local-volume", v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{ + SizeLimit: resource.NewQuantity(requests.Memory().Value(), resource.BinarySI), + }, + }) + var vols []v1.Volume + vols = append(vols, vol) + pod := newPod(name, priority, []v1.Container{ + newContainer(name, requests, limits), + }, vols) + + var podStats statsapi.PodStats + switch name { + case "empty-dir": + podStats = newPodMemoryStats(pod, *resource.NewQuantity(requests.Memory().Value()*2, resource.BinarySI)) + case "container-ephemeral-storage-limit": + podStats = newPodMemoryStats(pod, *resource.NewQuantity(limits.StorageEphemeral().Value(), resource.BinarySI)) + case "pod-ephemeral-storage-limit": + podStats = newPodMemoryStats(pod, *resource.NewQuantity(limits.StorageEphemeral().Value()*2, resource.BinarySI)) + default: + podStats = newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet)) + } + return pod, podStats +} + func makeMemoryStats(nodeAvailableBytes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary { val := resource.MustParse(nodeAvailableBytes) availableBytes := uint64(val.Value()) @@ -1784,3 +1810,70 @@ func TestUpdateMemcgThreshold(t *testing.T) { fakeClock.Step(2 * notifierRefreshInterval) manager.synchronize(diskInfoProvider, activePodsFunc) } + +func TestManagerWithLocalStorageCapacityIsolationOpen(t *testing.T) { + podMaker := makePodWithLocalStorageCapacityIsolationOpen + summaryStatsMaker := makeDiskStats + podsToMake := []podToMake{ + {name: "empty-dir", requests: newResourceList("", "900Mi", ""), limits: newResourceList("", "1Gi", "")}, + {name: "container-ephemeral-storage-limit", requests: newResourceList("", "", "900Mi"), limits: newResourceList("", "", "800Mi")}, + {name: "pod-ephemeral-storage-limit", requests: newResourceList("", "", "1Gi"), limits: newResourceList("", "", "800Mi")}, + } + + pods := []*v1.Pod{} + podStats := map[*v1.Pod]statsapi.PodStats{} + for _, podToMake := range podsToMake { + pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) + pods = append(pods, pod) + podStats[pod] = podStat + } + + summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1Gi", "200Mi", podStats)} + + config := Config{ + MaxPodGracePeriodSeconds: 5, + PressureTransitionPeriod: time.Minute * 5, + Thresholds: []evictionapi.Threshold{ + { + Signal: evictionapi.SignalAllocatableMemoryAvailable, + Operator: evictionapi.OpLessThan, + Value: evictionapi.ThresholdValue{ + Quantity: quantityMustParse("1Gi"), + }, + }, + }, + } + + podKiller := &mockPodKiller{} + diskGC := &mockDiskGC{err: nil} + nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + fakeClock := testingclock.NewFakeClock(time.Now()) + diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} + + mgr := &managerImpl{ + clock: fakeClock, + killPodFunc: podKiller.killPodNow, + imageGC: diskGC, + containerGC: diskGC, + config: config, + recorder: &record.FakeRecorder{}, + summaryProvider: summaryProvider, + nodeRef: nodeRef, + localStorageCapacityIsolation: true, + dedicatedImageFs: &diskInfoProvider.dedicatedImageFs, + } + + activePodsFunc := func() []*v1.Pod { + return pods + } + + evictedPods := mgr.synchronize(diskInfoProvider, activePodsFunc) + + if podKiller.pod == nil { + t.Fatalf("Manager should have selected a pod for eviction") + } + + if diff := cmp.Diff(pods, evictedPods); diff != "" { + t.Fatalf("Unexpected evicted pod (-want,+got):\n%s", diff) + } +} diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 4bb8ac4cf3f88..0a7b3c51f8a47 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -2029,6 +2029,24 @@ func newPodMemoryStats(pod *v1.Pod, workingSet resource.Quantity) statsapi.PodSt Memory: &statsapi.MemoryStats{ WorkingSetBytes: &workingSetBytes, }, + VolumeStats: []statsapi.VolumeStats{ + { + FsStats: statsapi.FsStats{ + UsedBytes: &workingSetBytes, + }, + Name: "local-volume", + }, + }, + Containers: []statsapi.ContainerStats{ + { + Name: pod.Name, + Logs: &statsapi.FsStats{ + UsedBytes: &workingSetBytes, + }, + Rootfs: &statsapi.FsStats{UsedBytes: &workingSetBytes}, + }, + }, + EphemeralStorage: &statsapi.FsStats{UsedBytes: &workingSetBytes}, } } diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index 451dfb0839021..8df2abaf9f69b 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -36,6 +36,7 @@ import ( statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" ) @@ -78,6 +79,11 @@ type ImageGCPolicy struct { // Minimum age at which an image can be garbage collected. MinAge time.Duration + + // Maximum age after which an image can be garbage collected, regardless of disk usage. + // Currently gated by MaximumImageGCAge feature gate and Kubelet configuration. + // If 0, the feature is disabled. + MaxAge time.Duration } type realImageGCManager struct { @@ -106,9 +112,6 @@ type realImageGCManager struct { // imageCache is the cache of latest image list. imageCache imageCache - // sandbox image exempted from GC - sandboxImage string - // tracer for recording spans tracer trace.Tracer } @@ -160,7 +163,7 @@ type imageRecord struct { } // NewImageGCManager instantiates a new ImageGCManager object. -func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string, tracerProvider trace.TracerProvider) (ImageGCManager, error) { +func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, tracerProvider trace.TracerProvider) (ImageGCManager, error) { // Validate policy. if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 { return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent) @@ -180,7 +183,6 @@ func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, r recorder: recorder, nodeRef: nodeRef, initialized: false, - sandboxImage: sandboxImage, tracer: tracer, } @@ -223,12 +225,6 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) { func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.String, error) { imagesInUse := sets.NewString() - // Always consider the container runtime pod sandbox image in use - imageRef, err := im.runtime.GetImageRef(ctx, container.ImageSpec{Image: im.sandboxImage}) - if err == nil && imageRef != "" { - imagesInUse.Insert(imageRef) - } - images, err := im.runtime.ListImages(ctx) if err != nil { return imagesInUse, err @@ -290,6 +286,18 @@ func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time. func (im *realImageGCManager) GarbageCollect(ctx context.Context) error { ctx, otelSpan := im.tracer.Start(ctx, "Images/GarbageCollect") defer otelSpan.End() + + freeTime := time.Now() + images, err := im.imagesInEvictionOrder(ctx, freeTime) + if err != nil { + return err + } + + images, err = im.freeOldImages(ctx, images, freeTime) + if err != nil { + return err + } + // Get disk usage on disk holding images. fsStats, err := im.statsProvider.ImageFsStats(ctx) if err != nil { @@ -321,7 +329,7 @@ func (im *realImageGCManager) GarbageCollect(ctx context.Context) error { if usagePercent >= im.policy.HighThresholdPercent { amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available klog.InfoS("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent) - freed, err := im.freeSpace(ctx, amountToFree, time.Now()) + freed, err := im.freeSpace(ctx, amountToFree, freeTime, images) if err != nil { return err } @@ -336,9 +344,39 @@ func (im *realImageGCManager) GarbageCollect(ctx context.Context) error { return nil } +func (im *realImageGCManager) freeOldImages(ctx context.Context, images []evictionInfo, freeTime time.Time) ([]evictionInfo, error) { + if im.policy.MaxAge == 0 { + return images, nil + } + var deletionErrors []error + remainingImages := make([]evictionInfo, 0) + for _, image := range images { + klog.V(5).InfoS("Evaluating image ID for possible garbage collection based on image age", "imageID", image.id) + // Evaluate whether image is older than MaxAge. + if freeTime.Sub(image.lastUsed) > im.policy.MaxAge { + if err := im.freeImage(ctx, image); err != nil { + deletionErrors = append(deletionErrors, err) + remainingImages = append(remainingImages, image) + continue + } + continue + } + remainingImages = append(remainingImages, image) + } + if len(deletionErrors) > 0 { + return remainingImages, fmt.Errorf("wanted to free images older than %v, encountered errors in image deletion: %v", im.policy.MaxAge, errors.NewAggregate(deletionErrors)) + } + return remainingImages, nil +} + func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error { klog.InfoS("Attempting to delete unused images") - _, err := im.freeSpace(ctx, math.MaxInt64, time.Now()) + freeTime := time.Now() + images, err := im.imagesInEvictionOrder(ctx, freeTime) + if err != nil { + return err + } + _, err = im.freeSpace(ctx, math.MaxInt64, freeTime, images) return err } @@ -348,40 +386,12 @@ func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error { // bytes freed is always returned. // Note that error may be nil and the number of bytes free may be less // than bytesToFree. -func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, freeTime time.Time) (int64, error) { - imagesInUse, err := im.detectImages(ctx, freeTime) - if err != nil { - return 0, err - } - - im.imageRecordsLock.Lock() - defer im.imageRecordsLock.Unlock() - - // Get all images in eviction order. - images := make([]evictionInfo, 0, len(im.imageRecords)) - for image, record := range im.imageRecords { - if isImageUsed(image, imagesInUse) { - klog.V(5).InfoS("Image ID is being used", "imageID", image) - continue - } - // Check if image is pinned, prevent garbage collection - if record.pinned { - klog.V(5).InfoS("Image is pinned, skipping garbage collection", "imageID", image) - continue - - } - images = append(images, evictionInfo{ - id: image, - imageRecord: *record, - }) - } - sort.Sort(byLastUsedAndDetected(images)) - +func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, freeTime time.Time, images []evictionInfo) (int64, error) { // Delete unused images until we've freed up enough space. var deletionErrors []error spaceFreed := int64(0) for _, image := range images { - klog.V(5).InfoS("Evaluating image ID for possible garbage collection", "imageID", image.id) + klog.V(5).InfoS("Evaluating image ID for possible garbage collection based on disk usage", "imageID", image.id) // Images that are currently in used were given a newer lastUsed. if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) { klog.V(5).InfoS("Image ID was used too recently, not eligible for garbage collection", "imageID", image.id, "lastUsed", image.lastUsed, "freeTime", freeTime) @@ -390,20 +400,15 @@ func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, // Avoid garbage collect the image if the image is not old enough. // In such a case, the image may have just been pulled down, and will be used by a container right away. - if freeTime.Sub(image.firstDetected) < im.policy.MinAge { klog.V(5).InfoS("Image ID's age is less than the policy's minAge, not eligible for garbage collection", "imageID", image.id, "age", freeTime.Sub(image.firstDetected), "minAge", im.policy.MinAge) continue } - // Remove image. Continue despite errors. - klog.InfoS("Removing image to free bytes", "imageID", image.id, "size", image.size) - err := im.runtime.RemoveImage(ctx, container.ImageSpec{Image: image.id}) - if err != nil { + if err := im.freeImage(ctx, image); err != nil { deletionErrors = append(deletionErrors, err) continue } - delete(im.imageRecords, image.id) spaceFreed += image.size if spaceFreed >= bytesToFree { @@ -417,6 +422,50 @@ func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, return spaceFreed, nil } +func (im *realImageGCManager) freeImage(ctx context.Context, image evictionInfo) error { + // Remove image. Continue despite errors. + klog.InfoS("Removing image to free bytes", "imageID", image.id, "size", image.size) + err := im.runtime.RemoveImage(ctx, container.ImageSpec{Image: image.id}) + if err != nil { + return err + } + delete(im.imageRecords, image.id) + metrics.ImageGarbageCollectedTotal.Inc() + return err +} + +// Queries all of the image records and arranges them in a slice of evictionInfo, sorted based on last time used, ignoring images pinned by the runtime. +func (im *realImageGCManager) imagesInEvictionOrder(ctx context.Context, freeTime time.Time) ([]evictionInfo, error) { + imagesInUse, err := im.detectImages(ctx, freeTime) + if err != nil { + return nil, err + } + + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + + // Get all images in eviction order. + images := make([]evictionInfo, 0, len(im.imageRecords)) + for image, record := range im.imageRecords { + if isImageUsed(image, imagesInUse) { + klog.V(5).InfoS("Image ID is being used", "imageID", image) + continue + } + // Check if image is pinned, prevent garbage collection + if record.pinned { + klog.V(5).InfoS("Image is pinned, skipping garbage collection", "imageID", image) + continue + + } + images = append(images, evictionInfo{ + id: image, + imageRecord: *record, + }) + } + sort.Sort(byLastUsedAndDetected(images)) + return images, nil +} + type evictionInfo struct { id string imageRecord diff --git a/pkg/kubelet/images/image_gc_manager_test.go b/pkg/kubelet/images/image_gc_manager_test.go index 2bb6061a12537..4507f3c0677b3 100644 --- a/pkg/kubelet/images/image_gc_manager_test.go +++ b/pkg/kubelet/images/image_gc_manager_test.go @@ -48,7 +48,6 @@ func newRealImageGCManager(policy ImageGCPolicy, mockStatsProvider stats.Provide imageRecords: make(map[string]*imageRecord), statsProvider: mockStatsProvider, recorder: &record.FakeRecorder{}, - sandboxImage: sandboxImage, tracer: oteltrace.NewNoopTracerProvider().Tracer(""), }, fakeRuntime } @@ -202,8 +201,9 @@ func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) { manager, fakeRuntime := newRealImageGCManager(ImageGCPolicy{}, mockStatsProvider) fakeRuntime.ImageList = []container.Image{ { - ID: sandboxImage, - Size: 1024, + ID: sandboxImage, + Size: 1024, + Pinned: true, }, } @@ -233,7 +233,7 @@ func TestDeletePinnedImage(t *testing.T) { err := manager.DeleteUnusedImages(ctx) assert := assert.New(t) - assert.Len(fakeRuntime.ImageList, 2) + assert.Len(fakeRuntime.ImageList, 1) require.NoError(t, err) } @@ -255,11 +255,8 @@ func TestDoNotDeletePinnedImage(t *testing.T) { }, } - spaceFreed, err := manager.freeSpace(ctx, 4096, time.Now()) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(1024, spaceFreed) - assert.Len(fakeRuntime.ImageList, 1) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 4096, 1024, 1, time.Now()) } func TestDeleteUnPinnedImage(t *testing.T) { @@ -280,11 +277,8 @@ func TestDeleteUnPinnedImage(t *testing.T) { }, } - spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now()) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(2048, spaceFreed) - assert.Len(fakeRuntime.ImageList, 0) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 2048, 2048, 0, time.Now()) } func TestAllPinnedImages(t *testing.T) { @@ -306,11 +300,8 @@ func TestAllPinnedImages(t *testing.T) { }, } - spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now()) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(0, spaceFreed) - assert.Len(fakeRuntime.ImageList, 2) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 2048, 0, 2, time.Now()) } func TestDetectImagesContainerStopped(t *testing.T) { @@ -404,11 +395,8 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) { }}, } - spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now()) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(1024, spaceFreed) - assert.Len(fakeRuntime.ImageList, 1) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 2048, 1024, 1, time.Now()) } func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) { @@ -487,11 +475,8 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { // We're setting the delete time one minute in the future, so the time the image // was first detected and the delete time are different. - spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now().Add(time.Minute)) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(1024, spaceFreed) - assert.Len(fakeRuntime.ImageList, 1) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 1024, 1024, 1, time.Now().Add(time.Minute)) } func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { @@ -526,11 +511,8 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) - spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now()) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(2048, spaceFreed) - assert.Len(fakeRuntime.ImageList, 1) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 1024, 2048, 1, time.Now()) } func TestGarbageCollectBelowLowThreshold(t *testing.T) { @@ -653,20 +635,136 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) { require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) // no space freed since one image is in used, and another one is not old enough - spaceFreed, err := manager.freeSpace(ctx, 1024, fakeClock.Now()) assert := assert.New(t) - require.NoError(t, err) - assert.EqualValues(0, spaceFreed) - assert.Len(fakeRuntime.ImageList, 2) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 1024, 0, 2, fakeClock.Now()) // move clock by minAge duration, then 1 image will be garbage collected fakeClock.Step(policy.MinAge) - spaceFreed, err = manager.freeSpace(ctx, 1024, fakeClock.Now()) + getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 1024, 1024, 1, fakeClock.Now()) +} + +func getImagesAndFreeSpace(ctx context.Context, t *testing.T, assert *assert.Assertions, im *realImageGCManager, fakeRuntime *containertest.FakeRuntime, spaceToFree, expectedSpaceFreed int64, imagesLen int, freeTime time.Time) { + images, err := im.imagesInEvictionOrder(ctx, freeTime) + require.NoError(t, err) + spaceFreed, err := im.freeSpace(ctx, spaceToFree, freeTime, images) + require.NoError(t, err) + assert.EqualValues(expectedSpaceFreed, spaceFreed) + assert.Len(fakeRuntime.ImageList, imagesLen) +} + +func TestGarbageCollectImageTooOld(t *testing.T) { + ctx := context.Background() + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + MinAge: 0, + MaxAge: time.Minute * 1, + } + fakeRuntime := &containertest.FakeRuntime{} + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockStatsProvider := statstest.NewMockProvider(mockCtrl) + manager := &realImageGCManager{ + runtime: fakeRuntime, + policy: policy, + imageRecords: make(map[string]*imageRecord), + statsProvider: mockStatsProvider, + recorder: &record.FakeRecorder{}, + } + + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + // 1 image is in use, and another one is not old enough + fakeRuntime.AllPodList = []*containertest.FakePod{ + {Pod: &container.Pod{ + Containers: []*container.Container{ + makeContainer(1), + }, + }}, + } + + fakeClock := testingclock.NewFakeClock(time.Now()) + t.Log(fakeClock.Now()) + images, err := manager.imagesInEvictionOrder(ctx, fakeClock.Now()) + require.NoError(t, err) + require.Equal(t, len(images), 1) + // Simulate pod having just used this image, but having been GC'd + images[0].lastUsed = fakeClock.Now() + + // First GC round should not GC remaining image, as it was used too recently. + assert := assert.New(t) + images, err = manager.freeOldImages(ctx, images, fakeClock.Now()) require.NoError(t, err) - assert.EqualValues(1024, spaceFreed) + assert.Len(images, 1) + assert.Len(fakeRuntime.ImageList, 2) + + // move clock by a millisecond past maxAge duration, then 1 image will be garbage collected + fakeClock.Step(policy.MaxAge + 1) + images, err = manager.freeOldImages(ctx, images, fakeClock.Now()) + require.NoError(t, err) + assert.Len(images, 0) assert.Len(fakeRuntime.ImageList, 1) } +func TestGarbageCollectImageMaxAgeDisabled(t *testing.T) { + ctx := context.Background() + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + MinAge: 0, + MaxAge: 0, + } + fakeRuntime := &containertest.FakeRuntime{} + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockStatsProvider := statstest.NewMockProvider(mockCtrl) + manager := &realImageGCManager{ + runtime: fakeRuntime, + policy: policy, + imageRecords: make(map[string]*imageRecord), + statsProvider: mockStatsProvider, + recorder: &record.FakeRecorder{}, + } + + assert := assert.New(t) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + assert.Len(fakeRuntime.ImageList, 2) + // 1 image is in use, and another one is not old enough + fakeRuntime.AllPodList = []*containertest.FakePod{ + {Pod: &container.Pod{ + Containers: []*container.Container{ + makeContainer(1), + }, + }}, + } + + fakeClock := testingclock.NewFakeClock(time.Now()) + t.Log(fakeClock.Now()) + images, err := manager.imagesInEvictionOrder(ctx, fakeClock.Now()) + require.NoError(t, err) + require.Equal(t, len(images), 1) + assert.Len(fakeRuntime.ImageList, 2) + + // First GC round should not GC remaining image, as it was used too recently. + images, err = manager.freeOldImages(ctx, images, fakeClock.Now()) + require.NoError(t, err) + assert.Len(images, 1) + assert.Len(fakeRuntime.ImageList, 2) + + // Move clock by a lot, and the images should continue to not be garbage colleced + // See https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go + fakeClock.SetTime(time.Unix(1<<63-62135596801, 999999999)) + images, err = manager.freeOldImages(ctx, images, fakeClock.Now()) + require.NoError(t, err) + assert.Len(images, 1) + assert.Len(fakeRuntime.ImageList, 2) +} + func TestValidateImageGCPolicy(t *testing.T) { testCases := []struct { name string @@ -719,7 +817,7 @@ func TestValidateImageGCPolicy(t *testing.T) { } for _, tc := range testCases { - if _, err := NewImageGCManager(nil, nil, nil, nil, tc.imageGCPolicy, "", oteltrace.NewNoopTracerProvider()); err != nil { + if _, err := NewImageGCManager(nil, nil, nil, nil, tc.imageGCPolicy, oteltrace.NewNoopTracerProvider()); err != nil { if err.Error() != tc.expectErr { t.Errorf("[%s:]Expected err:%v, but got:%v", tc.name, tc.expectErr, err.Error()) } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index c7a4ad9484280..f5735b83502aa 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -36,6 +36,7 @@ import ( libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/selinux/go-selinux" "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" "go.opentelemetry.io/otel/trace" "k8s.io/client-go/informers" @@ -248,29 +249,30 @@ type Dependencies struct { Options []Option // Injected Dependencies - Auth server.AuthInterface - CAdvisorInterface cadvisor.Interface - Cloud cloudprovider.Interface - ContainerManager cm.ContainerManager - EventClient v1core.EventsGetter - HeartbeatClient clientset.Interface - OnHeartbeatFailure func() - KubeClient clientset.Interface - Mounter mount.Interface - HostUtil hostutil.HostUtils - OOMAdjuster *oom.OOMAdjuster - OSInterface kubecontainer.OSInterface - PodConfig *config.PodConfig - ProbeManager prober.Manager - Recorder record.EventRecorder - Subpather subpath.Interface - TracerProvider trace.TracerProvider - VolumePlugins []volume.VolumePlugin - DynamicPluginProber volume.DynamicPluginProber - TLSOptions *server.TLSOptions - RemoteRuntimeService internalapi.RuntimeService - RemoteImageService internalapi.ImageManagerService - PodStartupLatencyTracker util.PodStartupLatencyTracker + Auth server.AuthInterface + CAdvisorInterface cadvisor.Interface + Cloud cloudprovider.Interface + ContainerManager cm.ContainerManager + EventClient v1core.EventsGetter + HeartbeatClient clientset.Interface + OnHeartbeatFailure func() + KubeClient clientset.Interface + Mounter mount.Interface + HostUtil hostutil.HostUtils + OOMAdjuster *oom.OOMAdjuster + OSInterface kubecontainer.OSInterface + PodConfig *config.PodConfig + ProbeManager prober.Manager + Recorder record.EventRecorder + Subpather subpath.Interface + TracerProvider trace.TracerProvider + VolumePlugins []volume.VolumePlugin + DynamicPluginProber volume.DynamicPluginProber + TLSOptions *server.TLSOptions + RemoteRuntimeService internalapi.RuntimeService + RemoteImageService internalapi.ImageManagerService + PodStartupLatencyTracker util.PodStartupLatencyTracker + NodeStartupLatencyTracker util.NodeStartupLatencyTracker // remove it after cadvisor.UsingLegacyCadvisorStats dropped. useLegacyCadvisorStats bool } @@ -423,6 +425,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, LowThresholdPercent: int(kubeCfg.ImageGCLowThresholdPercent), } + if utilfeature.DefaultFeatureGate.Enabled(features.ImageMaximumGCAge) { + imageGCPolicy.MaxAge = kubeCfg.ImageMaximumGCAge.Duration + } else if kubeCfg.ImageMaximumGCAge.Duration != 0 { + klog.InfoS("ImageMaximumGCAge flag enabled, but corresponding feature gate is not enabled. Ignoring flag.") + } + enforceNodeAllocatable := kubeCfg.EnforceNodeAllocatable if experimentalNodeAllocatableIgnoreEvictionThreshold { // Do not provide kubeCfg.EnforceNodeAllocatable to eviction threshold parsing if we are not enforcing Evictions @@ -552,6 +560,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, keepTerminatedPodVolumes: keepTerminatedPodVolumes, nodeStatusMaxImages: nodeStatusMaxImages, tracer: tracer, + nodeStartupLatencyTracker: kubeDeps.NodeStartupLatencyTracker, } if klet.cloud != nil { @@ -719,8 +728,11 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, RelistPeriod: genericPlegRelistPeriod, RelistThreshold: genericPlegRelistThreshold, } - klet.eventedPleg = pleg.NewEventedPLEG(klet.containerRuntime, klet.runtimeService, eventChannel, + klet.eventedPleg, err = pleg.NewEventedPLEG(klet.containerRuntime, klet.runtimeService, eventChannel, klet.podCache, klet.pleg, eventedPlegMaxStreamRetries, eventedRelistDuration, clock.RealClock{}) + if err != nil { + return nil, err + } } else { genericRelistDuration := &pleg.RelistDuration{ RelistPeriod: genericPlegRelistPeriod, @@ -747,7 +759,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod)) // setup imageManager - imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, crOptions.PodSandboxImage, kubeDeps.TracerProvider) + imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, kubeDeps.TracerProvider) if err != nil { return nil, fmt.Errorf("failed to initialize image manager: %v", err) } @@ -1293,6 +1305,9 @@ type Kubelet struct { // OpenTelemetry Tracer tracer trace.Tracer + + // Track node startup latencies + nodeStartupLatencyTracker util.NodeStartupLatencyTracker } // ListPodStats is delegated to StatsProvider, which implements stats.Provider interface @@ -1686,11 +1701,11 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // Callers should not write an event if this operation returns an error. func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (isTerminal bool, err error) { ctx, otelSpan := kl.tracer.Start(ctx, "syncPod", trace.WithAttributes( - attribute.String("k8s.pod.uid", string(pod.UID)), + semconv.K8SPodUIDKey.String(string(pod.UID)), attribute.String("k8s.pod", klog.KObj(pod).String()), - attribute.String("k8s.pod.name", pod.Name), + semconv.K8SPodNameKey.String(pod.Name), attribute.String("k8s.pod.update_type", updateType.String()), - attribute.String("k8s.namespace.name", pod.Namespace), + semconv.K8SNamespaceNameKey.String(pod.Namespace), )) klog.V(4).InfoS("SyncPod enter", "pod", klog.KObj(pod), "podUID", pod.UID) defer func() { @@ -1971,10 +1986,10 @@ func (kl *Kubelet) SyncTerminatingPod(_ context.Context, pod *v1.Pod, podStatus // TODO(#113606): connect this with the incoming context parameter, which comes from the pod worker. // Currently, using that context causes test failures. ctx, otelSpan := kl.tracer.Start(context.Background(), "syncTerminatingPod", trace.WithAttributes( - attribute.String("k8s.pod.uid", string(pod.UID)), + semconv.K8SPodUIDKey.String(string(pod.UID)), attribute.String("k8s.pod", klog.KObj(pod).String()), - attribute.String("k8s.pod.name", pod.Name), - attribute.String("k8s.namespace.name", pod.Namespace), + semconv.K8SPodNameKey.String(pod.Name), + semconv.K8SNamespaceNameKey.String(pod.Namespace), )) defer otelSpan.End() klog.V(4).InfoS("SyncTerminatingPod enter", "pod", klog.KObj(pod), "podUID", pod.UID) @@ -2106,10 +2121,10 @@ func (kl *Kubelet) SyncTerminatingRuntimePod(_ context.Context, runningPod *kube // kubelet restarts in the middle of the action. func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) error { ctx, otelSpan := kl.tracer.Start(ctx, "syncTerminatedPod", trace.WithAttributes( - attribute.String("k8s.pod.uid", string(pod.UID)), + semconv.K8SPodUIDKey.String(string(pod.UID)), attribute.String("k8s.pod", klog.KObj(pod).String()), - attribute.String("k8s.pod.name", pod.Name), - attribute.String("k8s.namespace.name", pod.Namespace), + semconv.K8SPodNameKey.String(pod.Name), + semconv.K8SNamespaceNameKey.String(pod.Namespace), )) defer otelSpan.End() klog.V(4).InfoS("SyncTerminatedPod enter", "pod", klog.KObj(pod), "podUID", pod.UID) @@ -2260,13 +2275,8 @@ func (kl *Kubelet) canAdmitPod(pods []*v1.Pod, pod *v1.Pod) (bool, string, strin otherPods := make([]*v1.Pod, 0, len(pods)) for _, p := range pods { op := p.DeepCopy() - for _, c := range op.Spec.Containers { - allocatedResources, found := kl.statusManager.GetContainerResourceAllocation(string(p.UID), c.Name) - if c.Resources.Requests != nil && found { - c.Resources.Requests[v1.ResourceCPU] = allocatedResources[v1.ResourceCPU] - c.Resources.Requests[v1.ResourceMemory] = allocatedResources[v1.ResourceMemory] - } - } + kl.updateContainerResourceAllocation(op) + otherPods = append(otherPods, op) } attrs.OtherPods = otherPods @@ -2541,13 +2551,8 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { // To handle kubelet restarts, test pod admissibility using AllocatedResources values // (for cpu & memory) from checkpoint store. If found, that is the source of truth. podCopy := pod.DeepCopy() - for _, c := range podCopy.Spec.Containers { - allocatedResources, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), c.Name) - if c.Resources.Requests != nil && found { - c.Resources.Requests[v1.ResourceCPU] = allocatedResources[v1.ResourceCPU] - c.Resources.Requests[v1.ResourceMemory] = allocatedResources[v1.ResourceMemory] - } - } + kl.updateContainerResourceAllocation(podCopy) + // Check if we can admit the pod; if not, reject it. if ok, reason, message := kl.canAdmitPod(activePods, podCopy); !ok { kl.rejectPod(pod, reason, message) @@ -2575,6 +2580,22 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { } } +// updateContainerResourceAllocation updates AllocatedResources values +// (for cpu & memory) from checkpoint store +func (kl *Kubelet) updateContainerResourceAllocation(pod *v1.Pod) { + for _, c := range pod.Spec.Containers { + allocatedResources, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), c.Name) + if c.Resources.Requests != nil && found { + if _, ok := allocatedResources[v1.ResourceCPU]; ok { + c.Resources.Requests[v1.ResourceCPU] = allocatedResources[v1.ResourceCPU] + } + if _, ok := allocatedResources[v1.ResourceMemory]; ok { + c.Resources.Requests[v1.ResourceMemory] = allocatedResources[v1.ResourceMemory] + } + } + } +} + // HandlePodUpdates is the callback in the SyncHandler interface for pods // being updated from a config source. func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index d598c55b79a93..7723abc64b291 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -52,6 +52,9 @@ func (kl *Kubelet) registerWithAPIServer() { if kl.registrationCompleted { return } + + kl.nodeStartupLatencyTracker.RecordAttemptRegisterNode() + step := 100 * time.Millisecond for { @@ -85,6 +88,7 @@ func (kl *Kubelet) registerWithAPIServer() { func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { _, err := kl.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) if err == nil { + kl.nodeStartupLatencyTracker.RecordRegisteredNewNode() return true } @@ -633,6 +637,12 @@ func (kl *Kubelet) patchNodeStatus(originalNode, node *v1.Node) (*v1.Node, error } kl.lastStatusReportTime = kl.clock.Now() kl.setLastObservedNodeAddresses(updatedNode.Status.Addresses) + + readyIdx, readyCondition := nodeutil.GetNodeCondition(&updatedNode.Status, v1.NodeReady) + if readyIdx >= 0 && readyCondition.Status == v1.ConditionTrue { + kl.nodeStartupLatencyTracker.RecordNodeReady() + } + return updatedNode, nil } diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 919946475bf73..4a55934f68d40 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -3367,7 +3367,7 @@ func Test_generateAPIPodStatus(t *testing.T) { LastTransitionTime: normalized_now, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, }, }, @@ -3408,7 +3408,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, }, }, @@ -3448,7 +3448,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, }, }, @@ -3489,7 +3489,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionFalse, }, }, @@ -3536,7 +3536,7 @@ func Test_generateAPIPodStatus(t *testing.T) { Message: "test", }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionFalse, }, }, @@ -3590,7 +3590,7 @@ func Test_generateAPIPodStatus(t *testing.T) { Message: "test", }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionFalse, }, }, @@ -3631,7 +3631,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, }, }, @@ -3687,7 +3687,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, }, }, @@ -3747,7 +3747,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodReadyToStartContainersCondition: v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, }, }, diff --git a/pkg/kubelet/kubelet_server_journal.go b/pkg/kubelet/kubelet_server_journal.go index bf3114519c65d..cd3a02649b273 100644 --- a/pkg/kubelet/kubelet_server_journal.go +++ b/pkg/kubelet/kubelet_server_journal.go @@ -53,7 +53,7 @@ var ( // The set of known safe characters to pass to journalctl / GetWinEvent flags - only add to this list if the // character cannot be used to create invalid sequences. This is intended as a broad defense against malformed // input that could cause an escape. - reServiceNameUnsafeCharacters = regexp.MustCompile(`[^a-zA-Z\-_0-9@]+`) + reServiceNameUnsafeCharacters = regexp.MustCompile(`[^a-zA-Z\-_.:0-9@]+`) ) // journalServer returns text output from the OS specific service logger to view @@ -267,7 +267,7 @@ func (n *nodeLogQuery) Copy(w io.Writer) { // set the deadline to the maximum across both runs ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) defer cancel() - boot := int(0) + boot := 0 if n.Boot != nil { boot = *n.Boot } @@ -354,7 +354,7 @@ func copyFileLogs(ctx context.Context, w io.Writer, services []string) { // in that order stopping on first success. func heuristicsCopyFileLogs(ctx context.Context, w io.Writer, service string) { logFileNames := [3]string{ - fmt.Sprintf("%s", service), + service, fmt.Sprintf("%s.log", service), fmt.Sprintf("%s/%s.log", service, service), } diff --git a/pkg/kubelet/kubelet_server_journal_test.go b/pkg/kubelet/kubelet_server_journal_test.go index bdcf14a41bb0d..430a73fc27cc1 100644 --- a/pkg/kubelet/kubelet_server_journal_test.go +++ b/pkg/kubelet/kubelet_server_journal_test.go @@ -130,6 +130,10 @@ func Test_validateServices(t *testing.T) { var ( service1 = "svc1" service2 = "svc2" + service3 = "svc.foo" + service4 = "svc_foo" + service5 = "svc@foo" + service6 = "svc:foo" invalid1 = "svc\n" invalid2 = "svc.foo\n" ) @@ -140,10 +144,14 @@ func Test_validateServices(t *testing.T) { }{ {name: "one service", services: []string{service1}}, {name: "two services", services: []string{service1, service2}}, + {name: "dot service", services: []string{service3}}, + {name: "underscore service", services: []string{service4}}, + {name: "at service", services: []string{service5}}, + {name: "colon service", services: []string{service6}}, {name: "invalid service new line", services: []string{invalid1}, wantErr: true}, {name: "invalid service with dot", services: []string{invalid2}, wantErr: true}, {name: "long service", services: []string{strings.Repeat(service1, 100)}, wantErr: true}, - {name: "max number of services", services: []string{service1, service2, service1, service2, service1}, wantErr: true}, + {name: "max number of services", services: []string{service1, service2, service3, service4, service5}, wantErr: true}, } for _, tt := range tests { errs := validateServices(tt.services) diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 1b83b320c0023..3da53e5ea8584 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -83,6 +83,7 @@ import ( serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/stats" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/kubelet/status/state" statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" "k8s.io/kubernetes/pkg/kubelet/token" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -268,6 +269,7 @@ func newTestKubeletWithImageList( kubelet.podManager = kubepod.NewBasicPodManager() podStartupLatencyTracker := kubeletutil.NewPodStartupLatencyTracker() kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker, kubelet.getRootDir()) + kubelet.nodeStartupLatencyTracker = kubeletutil.NewNodeStartupLatencyTracker() kubelet.containerRuntime = fakeRuntime kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime) @@ -311,7 +313,7 @@ func newTestKubeletWithImageList( HighThresholdPercent: 90, LowThresholdPercent: 80, } - imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, "", oteltrace.NewNoopTracerProvider()) + imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, oteltrace.NewNoopTracerProvider()) assert.NoError(t, err) kubelet.imageManager = &fakeImageGCManager{ fakeImageService: fakeRuntime, @@ -2449,6 +2451,190 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) { checkPodStatus(t, kl, podToAdmit, v1.PodPending) } +func TestPodResourceAllocationReset(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)() + testKubelet := newTestKubelet(t, false) + defer testKubelet.Cleanup() + kubelet := testKubelet.kubelet + kubelet.statusManager = status.NewFakeManager() + + nodes := []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("8Gi"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("4Gi"), + v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI), + }, + }, + }, + } + kubelet.nodeLister = testNodeLister{nodes: nodes} + + cpu500m := resource.MustParse("500m") + cpu800m := resource.MustParse("800m") + mem500M := resource.MustParse("500Mi") + mem800M := resource.MustParse("800Mi") + cpu500mMem500MPodSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + }, + }, + }, + } + cpu800mMem800MPodSpec := cpu500mMem500MPodSpec.DeepCopy() + cpu800mMem800MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu800m, v1.ResourceMemory: mem800M} + cpu800mPodSpec := cpu500mMem500MPodSpec.DeepCopy() + cpu800mPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu800m} + mem800MPodSpec := cpu500mMem500MPodSpec.DeepCopy() + mem800MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceMemory: mem800M} + + cpu500mPodSpec := cpu500mMem500MPodSpec.DeepCopy() + cpu500mPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu500m} + mem500MPodSpec := cpu500mMem500MPodSpec.DeepCopy() + mem500MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceMemory: mem500M} + emptyPodSpec := cpu500mMem500MPodSpec.DeepCopy() + emptyPodSpec.Containers[0].Resources.Requests = v1.ResourceList{} + + tests := []struct { + name string + pod *v1.Pod + existingPodAllocation *v1.Pod + expectedPodResourceAllocation state.PodResourceAllocation + }{ + { + name: "Having both memory and cpu, resource allocation not exists", + pod: podWithUIDNameNsSpec("1", "pod1", "foo", *cpu500mMem500MPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "1": map[string]v1.ResourceList{ + cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Having both memory and cpu, resource allocation exists", + pod: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "2": map[string]v1.ResourceList{ + cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Having both memory and cpu, resource allocation exists (with different value)", + pod: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu500mMem500MPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu800mMem800MPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "3": map[string]v1.ResourceList{ + cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Only has cpu, resource allocation not exists", + pod: podWithUIDNameNsSpec("4", "pod5", "foo", *cpu500mPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "4": map[string]v1.ResourceList{ + cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Only has cpu, resource allocation exists", + pod: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "5": map[string]v1.ResourceList{ + cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Only has cpu, resource allocation exists (with different value)", + pod: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu500mPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu800mPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "6": map[string]v1.ResourceList{ + cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Only has memory, resource allocation not exists", + pod: podWithUIDNameNsSpec("7", "pod7", "foo", *mem500MPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "7": map[string]v1.ResourceList{ + mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Only has memory, resource allocation exists", + pod: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "8": map[string]v1.ResourceList{ + mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "Only has memory, resource allocation exists (with different value)", + pod: podWithUIDNameNsSpec("9", "pod9", "foo", *mem500MPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("9", "pod9", "foo", *mem800MPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "9": map[string]v1.ResourceList{ + mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "No CPU and memory, resource allocation not exists", + pod: podWithUIDNameNsSpec("10", "pod10", "foo", *emptyPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "10": map[string]v1.ResourceList{ + emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + { + name: "No CPU and memory, resource allocation exists", + pod: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec), + existingPodAllocation: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec), + expectedPodResourceAllocation: state.PodResourceAllocation{ + "11": map[string]v1.ResourceList{ + emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources.Requests, + }, + }, + }, + } + for _, tc := range tests { + if tc.existingPodAllocation != nil { + // when kubelet restarts, AllocatedResources has already existed before adding pod + err := kubelet.statusManager.SetPodAllocation(tc.existingPodAllocation) + if err != nil { + t.Fatalf("failed to set pod allocation: %v", err) + } + } + kubelet.HandlePodAdditions([]*v1.Pod{tc.pod}) + + allocatedResources, found := kubelet.statusManager.GetContainerResourceAllocation(string(tc.pod.UID), tc.pod.Spec.Containers[0].Name) + if !found { + t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name) + } + assert.Equal(t, tc.expectedPodResourceAllocation[string(tc.pod.UID)][tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name) + } +} + func TestHandlePodResourcesResize(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)() testKubelet := newTestKubelet(t, false) diff --git a/pkg/kubelet/kuberuntime/instrumented_services.go b/pkg/kubelet/kuberuntime/instrumented_services.go index cfa633a99837b..3630d3d2b36a3 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services.go +++ b/pkg/kubelet/kuberuntime/instrumented_services.go @@ -317,7 +317,7 @@ func (in instrumentedImageManagerService) RemoveImage(ctx context.Context, image return err } -func (in instrumentedImageManagerService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (in instrumentedImageManagerService) ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) { const operation = "image_fs_info" defer recordOperation(operation, time.Now()) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go index c76389c0d9ff9..f14b6d1a6fc1e 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go @@ -107,18 +107,8 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod, lcr.HugepageLimits = GetHugepageLimitsFromResources(container.Resources) - if swapConfigurationHelper := newSwapConfigurationHelper(*m.machineInfo); utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeSwap) { - // NOTE(ehashman): Behaviour is defined in the opencontainers runtime spec: - // https://github.com/opencontainers/runtime-spec/blob/1c3f411f041711bbeecf35ff7e93461ea6789220/config-linux.md#memory - switch m.memorySwapBehavior { - case kubelettypes.LimitedSwap: - swapConfigurationHelper.ConfigureLimitedSwap(lcr, pod, container) - default: - swapConfigurationHelper.ConfigureUnlimitedSwap(lcr) - } - } else { - swapConfigurationHelper.ConfigureNoSwap(lcr) - } + // Configure swap for the container + m.configureContainerSwapResources(lcr, pod, container) // Set memory.min and memory.high to enforce MemoryQoS if enforceMemoryQoS { @@ -170,6 +160,30 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod, return lcr } +// configureContainerSwapResources configures the swap resources for a specified (linux) container. +// Swap is only configured if a swap cgroup controller is available and the NodeSwap feature gate is enabled. +func (m *kubeGenericRuntimeManager) configureContainerSwapResources(lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) { + if !swapControllerAvailable() { + klog.InfoS("No swap cgroup controller present", "swapBehavior", m.memorySwapBehavior, "pod", klog.KObj(pod), "containerName", container.Name) + return + } + swapConfigurationHelper := newSwapConfigurationHelper(*m.machineInfo) + + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeSwap) { + swapConfigurationHelper.ConfigureNoSwap(lcr) + return + } + + // NOTE(ehashman): Behavior is defined in the opencontainers runtime spec: + // https://github.com/opencontainers/runtime-spec/blob/1c3f411f041711bbeecf35ff7e93461ea6789220/config-linux.md#memory + switch m.memorySwapBehavior { + case kubelettypes.LimitedSwap: + swapConfigurationHelper.ConfigureLimitedSwap(lcr, pod, container) + default: + swapConfigurationHelper.ConfigureUnlimitedSwap(lcr) + } +} + // generateContainerResources generates platform specific (linux) container resources config for runtime func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources { enforceMemoryQoS := false @@ -315,12 +329,15 @@ var ( swapControllerAvailabilityOnce sync.Once ) -func swapControllerAvailable() bool { +// Note: this function variable is being added here so it would be possible to mock +// the swap controller availability for unit tests by assigning a new function to it. Without it, +// the swap controller availability would solely depend on the environment running the test. +var swapControllerAvailable = func() bool { // See https://github.com/containerd/containerd/pull/7838/ swapControllerAvailabilityOnce.Do(func() { const warn = "Failed to detect the availability of the swap controller, assuming not available" p := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" - if libcontainercgroups.IsCgroup2UnifiedMode() { + if isCgroup2UnifiedMode() { // memory.swap.max does not exist in the cgroup root, so we check /sys/fs/cgroup//memory.swap.max _, unified, err := cgroups.ParseCgroupFileUnified("/proc/self/cgroup") if err != nil { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go index b50eee1d4ee60..59be8cead50bb 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go @@ -22,14 +22,15 @@ package kuberuntime import ( "context" "fmt" - "k8s.io/kubernetes/pkg/kubelet/cm" - "k8s.io/kubernetes/pkg/kubelet/types" "math" "os" "reflect" "strconv" "testing" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/types" + "github.com/google/go-cmp/cmp" libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/stretchr/testify/assert" @@ -845,12 +846,12 @@ func TestGenerateLinuxContainerResources(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { + defer setSwapControllerAvailableDuringTest(false)() if tc.scalingFg { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)() } setCgroupVersionDuringTest(cgroupV1) - tc.expected.MemorySwapLimitInBytes = tc.expected.MemoryLimitInBytes pod.Spec.Containers[0].Resources = v1.ResourceRequirements{Limits: tc.limits, Requests: tc.requests} if len(tc.cStatus) > 0 { @@ -891,6 +892,19 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { Status: v1.PodStatus{}, } + expectSwapDisabled := func(cgroupVersion CgroupVersion, resources ...*runtimeapi.LinuxContainerResources) { + const msg = "container is expected to not have swap configured" + + for _, r := range resources { + switch cgroupVersion { + case cgroupV1: + assert.Equal(t, int64(0), r.MemorySwapLimitInBytes, msg) + case cgroupV2: + assert.NotContains(t, r.Unified, cm.Cgroup2MaxSwapFilename, msg) + } + } + } + expectNoSwap := func(cgroupVersion CgroupVersion, resources ...*runtimeapi.LinuxContainerResources) { const msg = "container is expected to not have swap access" @@ -939,6 +953,7 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { name string cgroupVersion CgroupVersion qosClass v1.PodQOSClass + swapDisabledOnNode bool nodeSwapFeatureGateEnabled bool swapBehavior string addContainerWithoutRequests bool @@ -994,28 +1009,173 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { // With Guaranteed and Best-effort QoS { - name: "Best-effort Qos, cgroups v2, LimitedSwap", + name: "Best-effort QoS, cgroups v2, LimitedSwap", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + }, + { + name: "Best-effort QoS, cgroups v2, UnlimitedSwap", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.UnlimitedSwap, + }, + { + name: "Guaranteed QoS, cgroups v2, LimitedSwap", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSGuaranteed, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + }, + { + name: "Guaranteed QoS, cgroups v2, UnlimitedSwap", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSGuaranteed, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.UnlimitedSwap, + }, + + // With a "guaranteed" container (when memory requests equal to limits) + { + name: "Burstable QoS, cgroups v2, LimitedSwap, with a guaranteed container", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + addContainerWithoutRequests: false, + addGuaranteedContainer: true, + }, + { + name: "Burstable QoS, cgroups v2, UnlimitedSwap, with a guaranteed container", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.UnlimitedSwap, + addContainerWithoutRequests: false, + addGuaranteedContainer: true, + }, + + // Swap is expected to be allocated + { + name: "Burstable QoS, cgroups v2, LimitedSwap", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + addContainerWithoutRequests: false, + addGuaranteedContainer: false, + }, + { + name: "Burstable QoS, cgroups v2, UnlimitedSwap", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.UnlimitedSwap, + addContainerWithoutRequests: false, + addGuaranteedContainer: false, + }, + { + name: "Burstable QoS, cgroups v2, LimitedSwap, with a container with no requests", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + addContainerWithoutRequests: true, + addGuaranteedContainer: false, + }, + { + name: "Burstable QoS, cgroups v2, UnlimitedSwap, with a container with no requests", + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.UnlimitedSwap, + addContainerWithoutRequests: true, + addGuaranteedContainer: false, + }, + // All the above examples with Swap disabled on node + { + name: "Swap disabled on node, cgroups v1, LimitedSwap, Burstable QoS", + swapDisabledOnNode: true, + cgroupVersion: cgroupV1, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + }, + { + name: "Swap disabled on node, cgroups v1, UnlimitedSwap, Burstable QoS", + swapDisabledOnNode: true, + cgroupVersion: cgroupV1, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.UnlimitedSwap, + }, + { + name: "Swap disabled on node, cgroups v1, LimitedSwap, Best-effort QoS", + swapDisabledOnNode: true, + cgroupVersion: cgroupV1, + qosClass: v1.PodQOSBestEffort, + nodeSwapFeatureGateEnabled: true, + swapBehavior: types.LimitedSwap, + }, + + // With feature gate turned off + { + name: "Swap disabled on node, NodeSwap feature gate turned off, cgroups v2, LimitedSwap", + swapDisabledOnNode: true, + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: false, + swapBehavior: types.LimitedSwap, + }, + { + name: "Swap disabled on node, NodeSwap feature gate turned off, cgroups v2, UnlimitedSwap", + swapDisabledOnNode: true, + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBurstable, + nodeSwapFeatureGateEnabled: false, + swapBehavior: types.UnlimitedSwap, + }, + + // With no swapBehavior, UnlimitedSwap should be the default + { + name: "Swap disabled on node, With no swapBehavior - UnlimitedSwap should be the default", + swapDisabledOnNode: true, + cgroupVersion: cgroupV2, + qosClass: v1.PodQOSBestEffort, + nodeSwapFeatureGateEnabled: true, + swapBehavior: "", + }, + + // With Guaranteed and Best-effort QoS + { + name: "Swap disabled on node, Best-effort QoS, cgroups v2, LimitedSwap", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, swapBehavior: types.LimitedSwap, }, { - name: "Best-effort Qos, cgroups v2, UnlimitedSwap", + name: "Swap disabled on node, Best-effort QoS, cgroups v2, UnlimitedSwap", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, swapBehavior: types.UnlimitedSwap, }, { - name: "Guaranteed Qos, cgroups v2, LimitedSwap", + name: "Swap disabled on node, Guaranteed QoS, cgroups v2, LimitedSwap", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSGuaranteed, nodeSwapFeatureGateEnabled: true, swapBehavior: types.LimitedSwap, }, { - name: "Guaranteed Qos, cgroups v2, UnlimitedSwap", + name: "Swap disabled on node, Guaranteed QoS, cgroups v2, UnlimitedSwap", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSGuaranteed, nodeSwapFeatureGateEnabled: true, @@ -1024,7 +1184,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { // With a "guaranteed" container (when memory requests equal to limits) { - name: "Burstable Qos, cgroups v2, LimitedSwap, with a guaranteed container", + name: "Swap disabled on node, Burstable QoS, cgroups v2, LimitedSwap, with a guaranteed container", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, @@ -1033,7 +1194,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { addGuaranteedContainer: true, }, { - name: "Burstable Qos, cgroups v2, UnlimitedSwap, with a guaranteed container", + name: "Swap disabled on node, Burstable QoS, cgroups v2, UnlimitedSwap, with a guaranteed container", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, @@ -1044,7 +1206,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { // Swap is expected to be allocated { - name: "Burstable Qos, cgroups v2, LimitedSwap", + name: "Swap disabled on node, Burstable QoS, cgroups v2, LimitedSwap", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, @@ -1053,7 +1216,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { addGuaranteedContainer: false, }, { - name: "Burstable Qos, cgroups v2, UnlimitedSwap", + name: "Swap disabled on node, Burstable QoS, cgroups v2, UnlimitedSwap", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, @@ -1062,7 +1226,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { addGuaranteedContainer: false, }, { - name: "Burstable Qos, cgroups v2, LimitedSwap, with a container with no requests", + name: "Swap disabled on node, Burstable QoS, cgroups v2, LimitedSwap, with a container with no requests", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, @@ -1071,7 +1236,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { addGuaranteedContainer: false, }, { - name: "Burstable Qos, cgroups v2, UnlimitedSwap, with a container with no requests", + name: "Swap disabled on node, Burstable QoS, cgroups v2, UnlimitedSwap, with a container with no requests", + swapDisabledOnNode: true, cgroupVersion: cgroupV2, qosClass: v1.PodQOSBurstable, nodeSwapFeatureGateEnabled: true, @@ -1082,6 +1248,7 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { setCgroupVersionDuringTest(tc.cgroupVersion) + defer setSwapControllerAvailableDuringTest(!tc.swapDisabledOnNode)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeSwap, tc.nodeSwapFeatureGateEnabled)() m.memorySwapBehavior = tc.swapBehavior @@ -1117,6 +1284,11 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { resourcesC1 := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[0], false) resourcesC2 := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[1], false) + if tc.swapDisabledOnNode { + expectSwapDisabled(tc.cgroupVersion, resourcesC1, resourcesC2) + return + } + if !tc.nodeSwapFeatureGateEnabled || tc.cgroupVersion == cgroupV1 || (tc.swapBehavior == types.LimitedSwap && tc.qosClass != v1.PodQOSBurstable) { expectNoSwap(tc.cgroupVersion, resourcesC1, resourcesC2) return @@ -1151,3 +1323,14 @@ func setCgroupVersionDuringTest(version CgroupVersion) { return version == cgroupV2 } } + +func setSwapControllerAvailableDuringTest(available bool) func() { + original := swapControllerAvailable + swapControllerAvailable = func() bool { + return available + } + + return func() { + swapControllerAvailable = original + } +} diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 9854b207fd0c2..5306dac2e0387 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -676,7 +676,7 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku switch rName { case v1.ResourceCPU: podCpuResources := &cm.ResourceConfig{CPUPeriod: podResources.CPUPeriod} - if setLimitValue == true { + if setLimitValue { podCpuResources.CPUQuota = podResources.CPUQuota } else { podCpuResources.CPUShares = podResources.CPUShares diff --git a/pkg/kubelet/kuberuntime/logs/logs.go b/pkg/kubelet/kuberuntime/logs/logs.go index a2dedb08f80ff..b22734d792c72 100644 --- a/pkg/kubelet/kuberuntime/logs/logs.go +++ b/pkg/kubelet/kuberuntime/logs/logs.go @@ -318,6 +318,8 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r found := true writer := newLogWriter(stdout, stderr, opts) msg := &logMessage{} + baseName := filepath.Base(path) + dir := filepath.Dir(path) for { if stop || (limitedMode && limitedNum == 0) { klog.V(2).InfoS("Finished parsing log file", "path", path) @@ -344,8 +346,8 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r return fmt.Errorf("failed to create fsnotify watcher: %v", err) } defer watcher.Close() - if err := watcher.Add(f.Name()); err != nil { - return fmt.Errorf("failed to watch file %q: %v", f.Name(), err) + if err := watcher.Add(dir); err != nil { + return fmt.Errorf("failed to watch directory %q: %w", dir, err) } // If we just created the watcher, try again to read as we might have missed // the event. @@ -353,7 +355,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r } var recreated bool // Wait until the next log change. - found, recreated, err = waitLogs(ctx, containerID, watcher, runtimeService) + found, recreated, err = waitLogs(ctx, containerID, baseName, watcher, runtimeService) if err != nil { return err } @@ -367,13 +369,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r } defer newF.Close() f.Close() - if err := watcher.Remove(f.Name()); err != nil && !os.IsNotExist(err) { - klog.ErrorS(err, "Failed to remove file watch", "path", f.Name()) - } f = newF - if err := watcher.Add(f.Name()); err != nil { - return fmt.Errorf("failed to watch file %q: %v", f.Name(), err) - } r = bufio.NewReader(f) } // If the container exited consume data until the next EOF @@ -441,7 +437,7 @@ func isContainerRunning(ctx context.Context, id string, r internalapi.RuntimeSer // waitLogs wait for the next log write. It returns two booleans and an error. The first boolean // indicates whether a new log is found; the second boolean if the log file was recreated; // the error is error happens during waiting new logs. -func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, bool, error) { +func waitLogs(ctx context.Context, id string, logName string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, bool, error) { // no need to wait if the pod is not running if running, err := isContainerRunning(ctx, id, runtimeService); !running { return false, false, err @@ -453,16 +449,10 @@ func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeServic return false, false, fmt.Errorf("context cancelled") case e := <-w.Events: switch e.Op { - case fsnotify.Write: + case fsnotify.Write, fsnotify.Rename, fsnotify.Remove, fsnotify.Chmod: return true, false, nil case fsnotify.Create: - fallthrough - case fsnotify.Rename: - fallthrough - case fsnotify.Remove: - fallthrough - case fsnotify.Chmod: - return true, true, nil + return true, filepath.Base(e.Name) == logName, nil default: klog.ErrorS(nil, "Received unexpected fsnotify event, retrying", "event", e) } diff --git a/pkg/kubelet/kuberuntime/logs/logs_test.go b/pkg/kubelet/kuberuntime/logs/logs_test.go index f2c17cb8161ab..7b5e2b3a585f3 100644 --- a/pkg/kubelet/kuberuntime/logs/logs_test.go +++ b/pkg/kubelet/kuberuntime/logs/logs_test.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "os" + "path/filepath" "testing" "time" @@ -30,6 +31,7 @@ import ( v1 "k8s.io/api/core/v1" apitesting "k8s.io/cri-api/pkg/apis/testing" + "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/utils/pointer" "github.com/stretchr/testify/assert" @@ -211,6 +213,88 @@ func TestReadLogs(t *testing.T) { } } +func TestReadRotatedLog(t *testing.T) { + tmpDir := t.TempDir() + file, err := os.CreateTemp(tmpDir, "logfile") + if err != nil { + assert.NoErrorf(t, err, "unable to create temp file") + } + + stdoutBuf := &bytes.Buffer{} + stderrBuf := &bytes.Buffer{} + containerID := "fake-container-id" + fakeRuntimeService := &apitesting.FakeRuntimeService{ + Containers: map[string]*apitesting.FakeContainer{ + containerID: { + ContainerStatus: runtimeapi.ContainerStatus{ + State: runtimeapi.ContainerState_CONTAINER_RUNNING, + }, + }, + }, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Start to follow the container's log. + go func(ctx context.Context) { + podLogOptions := v1.PodLogOptions{ + Follow: true, + } + opts := NewLogOptions(&podLogOptions, time.Now()) + ReadLogs(ctx, file.Name(), containerID, opts, fakeRuntimeService, stdoutBuf, stderrBuf) + }(ctx) + + // log in stdout + expectedStdout := "line0\nline2\nline4\nline6\nline8\n" + // log in stderr + expectedStderr := "line1\nline3\nline5\nline7\nline9\n" + + dir := filepath.Dir(file.Name()) + baseName := filepath.Base(file.Name()) + + // Write 10 lines to log file. + // Let ReadLogs start. + time.Sleep(50 * time.Millisecond) + for line := 0; line < 10; line++ { + // Write the first three lines to log file + now := time.Now().Format(types.RFC3339NanoLenient) + if line%2 == 0 { + file.WriteString(fmt.Sprintf( + `{"log":"line%d\n","stream":"stdout","time":"%s"}`+"\n", line, now)) + } else { + file.WriteString(fmt.Sprintf( + `{"log":"line%d\n","stream":"stderr","time":"%s"}`+"\n", line, now)) + } + time.Sleep(1 * time.Millisecond) + + if line == 5 { + file.Close() + // Pretend to rotate the log. + rotatedName := fmt.Sprintf("%s.%s", baseName, time.Now().Format("220060102-150405")) + rotatedName = filepath.Join(dir, rotatedName) + if err := os.Rename(filepath.Join(dir, baseName), rotatedName); err != nil { + assert.NoErrorf(t, err, "failed to rotate log %q to %q", file.Name(), rotatedName) + return + } + + newF := filepath.Join(dir, baseName) + if file, err = os.Create(newF); err != nil { + assert.NoError(t, err, "unable to create new log file") + return + } + time.Sleep(20 * time.Millisecond) + } + } + + time.Sleep(20 * time.Millisecond) + // Make the function ReadLogs end. + fakeRuntimeService.Lock() + fakeRuntimeService.Containers[containerID].State = runtimeapi.ContainerState_CONTAINER_EXITED + fakeRuntimeService.Unlock() + + assert.Equal(t, expectedStdout, stdoutBuf.String()) + assert.Equal(t, expectedStderr, stderrBuf.String()) +} + func TestParseLog(t *testing.T) { timestamp, err := time.Parse(timeFormatIn, "2016-10-20T18:39:20.57606443Z") assert.NoError(t, err) diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index 910c7a42edcc8..fcd7656bf9cf8 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -26,6 +26,7 @@ import ( "net/url" "strconv" "strings" + "time" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -86,6 +87,14 @@ func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.Cont klog.V(1).ErrorS(err, "HTTP lifecycle hook for Container in Pod failed", "path", handler.HTTPGet.Path, "containerName", container.Name, "pod", klog.KObj(pod)) } return msg, err + case handler.Sleep != nil: + err := hr.runSleepHandler(ctx, handler.Sleep.Seconds) + var msg string + if err != nil { + msg = fmt.Sprintf("Sleep lifecycle hook (%d) for Container %q in Pod %q failed - error: %v", handler.Sleep.Seconds, container.Name, format.Pod(pod), err) + klog.V(1).ErrorS(err, "Sleep lifecycle hook for Container in Pod failed", "sleepSeconds", handler.Sleep.Seconds, "containerName", container.Name, "pod", klog.KObj(pod)) + } + return msg, err default: err := fmt.Errorf("invalid handler: %v", handler) msg := fmt.Sprintf("Cannot run handler: %v", err) @@ -117,6 +126,20 @@ func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) } +func (hr *handlerRunner) runSleepHandler(ctx context.Context, seconds int64) error { + if !utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepAction) { + return nil + } + c := time.After(time.Duration(seconds) * time.Second) + select { + case <-ctx.Done(): + // unexpected termination + return fmt.Errorf("container terminated before sleep hook finished") + case <-c: + return nil + } +} + func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error { host := handler.HTTPGet.Host podIP := host diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go index a6d095add3873..2b8be29891983 100644 --- a/pkg/kubelet/lifecycle/handlers_test.go +++ b/pkg/kubelet/lifecycle/handlers_test.go @@ -43,48 +43,44 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" ) -func TestResolvePortInt(t *testing.T) { - expected := 80 - port, err := resolvePort(intstr.FromInt32(int32(expected)), &v1.Container{}) - if port != expected { - t.Errorf("expected: %d, saw: %d", expected, port) - } - if err != nil { - t.Errorf("unexpected error: %v", err) - } -} - -func TestResolvePortString(t *testing.T) { - expected := 80 - name := "foo" - container := &v1.Container{ - Ports: []v1.ContainerPort{ - {Name: name, ContainerPort: int32(expected)}, +func TestResolvePort(t *testing.T) { + for _, testCase := range []struct { + container *v1.Container + stringPort string + expected int + }{ + { + stringPort: "foo", + container: &v1.Container{ + Ports: []v1.ContainerPort{{Name: "foo", ContainerPort: int32(80)}}, + }, + expected: 80, }, - } - port, err := resolvePort(intstr.FromString(name), container) - if port != expected { - t.Errorf("expected: %d, saw: %d", expected, port) - } - if err != nil { - t.Errorf("unexpected error: %v", err) - } -} - -func TestResolvePortStringUnknown(t *testing.T) { - expected := int32(80) - name := "foo" - container := &v1.Container{ - Ports: []v1.ContainerPort{ - {Name: "bar", ContainerPort: expected}, + { + container: &v1.Container{}, + stringPort: "80", + expected: 80, }, - } - port, err := resolvePort(intstr.FromString(name), container) - if port != -1 { - t.Errorf("expected: -1, saw: %d", port) - } - if err == nil { - t.Error("unexpected non-error") + { + container: &v1.Container{ + Ports: []v1.ContainerPort{ + {Name: "bar", ContainerPort: int32(80)}, + }, + }, + stringPort: "foo", + expected: -1, + }, + } { + port, err := resolvePort(intstr.FromString(testCase.stringPort), testCase.container) + if testCase.expected != -1 && err != nil { + t.Fatalf("unexpected error while resolving port: %s", err) + } + if testCase.expected == -1 && err == nil { + t.Errorf("expected error when a port fails to resolve") + } + if testCase.expected != port { + t.Errorf("failed to resolve port, expected %d, got %d", testCase.expected, port) + } } } @@ -859,3 +855,59 @@ func TestIsHTTPResponseError(t *testing.T) { t.Errorf("unexpected http response error: %v", err) } } + +func TestRunSleepHandler(t *testing.T) { + handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil) + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + containerName := "containerFoo" + container := v1.Container{ + Name: containerName, + Lifecycle: &v1.Lifecycle{ + PreStop: &v1.LifecycleHandler{}, + }, + } + pod := v1.Pod{} + pod.ObjectMeta.Name = "podFoo" + pod.ObjectMeta.Namespace = "nsFoo" + pod.Spec.Containers = []v1.Container{container} + + tests := []struct { + name string + sleepSeconds int64 + terminationGracePeriodSeconds int64 + expectErr bool + expectedErr string + }{ + { + name: "valid seconds", + sleepSeconds: 5, + terminationGracePeriodSeconds: 30, + }, + { + name: "longer than TerminationGracePeriodSeconds", + sleepSeconds: 3, + terminationGracePeriodSeconds: 2, + expectErr: true, + expectedErr: "container terminated before sleep hook finished", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLifecycleSleepAction, true)() + + pod.Spec.Containers[0].Lifecycle.PreStop.Sleep = &v1.SleepAction{Seconds: tt.sleepSeconds} + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(tt.terminationGracePeriodSeconds)*time.Second) + defer cancel() + + _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PreStop) + + if !tt.expectErr && err != nil { + t.Errorf("unexpected success") + } + if tt.expectErr && err.Error() != tt.expectedErr { + t.Errorf("%s: expected error want %s, got %s", tt.name, tt.expectedErr, err.Error()) + } + }) + } +} diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index d20671749758f..8b93ba39ae3a7 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -71,6 +71,23 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult } } admitPod := attrs.Pod + + // perform the checks that preemption will not help first to avoid meaningless pod eviction + if rejectPodAdmissionBasedOnOSSelector(admitPod, node) { + return PodAdmitResult{ + Admit: false, + Reason: "PodOSSelectorNodeLabelDoesNotMatch", + Message: "Failed to admit pod as the `kubernetes.io/os` label doesn't match node label", + } + } + if rejectPodAdmissionBasedOnOSField(admitPod) { + return PodAdmitResult{ + Admit: false, + Reason: "PodOSNotSupported", + Message: "Failed to admit pod as the OS field doesn't match node OS", + } + } + pods := attrs.OtherPods nodeInfo := schedulerframework.NewNodeInfo(pods...) nodeInfo.SetNode(node) @@ -160,21 +177,6 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult Message: message, } } - if rejectPodAdmissionBasedOnOSSelector(admitPod, node) { - return PodAdmitResult{ - Admit: false, - Reason: "PodOSSelectorNodeLabelDoesNotMatch", - Message: "Failed to admit pod as the `kubernetes.io/os` label doesn't match node label", - } - } - // By this time, node labels should have been synced, this helps in identifying the pod with the usage. - if rejectPodAdmissionBasedOnOSField(admitPod) { - return PodAdmitResult{ - Admit: false, - Reason: "PodOSNotSupported", - Message: "Failed to admit pod as the OS field doesn't match node OS", - } - } return PodAdmitResult{ Admit: true, } diff --git a/pkg/kubelet/metrics/collectors/resource_metrics.go b/pkg/kubelet/metrics/collectors/resource_metrics.go index 36d29be4f6fff..c1b1cdffc2ab0 100644 --- a/pkg/kubelet/metrics/collectors/resource_metrics.go +++ b/pkg/kubelet/metrics/collectors/resource_metrics.go @@ -211,8 +211,7 @@ func (rc *resourceMetricsCollector) collectContainerStartTime(ch chan<- metrics. return } - ch <- metrics.NewLazyMetricWithTimestamp(s.StartTime.Time, - metrics.NewLazyConstMetric(containerStartTimeDesc, metrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace)) + ch <- metrics.NewLazyConstMetric(containerStartTimeDesc, metrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace) } func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- metrics.Metric, pod summary.PodStats, s summary.ContainerStats) { diff --git a/pkg/kubelet/metrics/collectors/resource_metrics_test.go b/pkg/kubelet/metrics/collectors/resource_metrics_test.go index f2bf39433c534..cc1cc573b55c1 100644 --- a/pkg/kubelet/metrics/collectors/resource_metrics_test.go +++ b/pkg/kubelet/metrics/collectors/resource_metrics_test.go @@ -213,9 +213,9 @@ func TestCollectResourceMetrics(t *testing.T) { container_memory_working_set_bytes{container="container_b",namespace="namespace_a",pod="pod_a"} 1000 1624396278302 # HELP container_start_time_seconds [STABLE] Start time of the container since unix epoch in seconds # TYPE container_start_time_seconds gauge - container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09 1624396248302 - container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09 1624395678302 - container_start_time_seconds{container="container_b",namespace="namespace_a",pod="pod_a"} 1.6243961583020916e+09 1624396158302 + container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09 + container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09 + container_start_time_seconds{container="container_b",namespace="namespace_a",pod="pod_a"} 1.6243961583020916e+09 # HELP container_swap_usage_bytes [ALPHA] Current amount of the container swap usage in bytes. Reported only on non-windows systems # TYPE container_swap_usage_bytes gauge container_swap_usage_bytes{container="container_a",namespace="namespace_a",pod="pod_a"} 1000 1624396278302 @@ -319,8 +319,8 @@ func TestCollectResourceMetrics(t *testing.T) { container_memory_working_set_bytes{container="container_a",namespace="namespace_b",pod="pod_b"} 1000 1624396278302 # HELP container_start_time_seconds [STABLE] Start time of the container since unix epoch in seconds # TYPE container_start_time_seconds gauge - container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09 1624396248302 - container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09 1624395678302 + container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09 + container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09 # HELP scrape_error [ALPHA] 1 if there was an error while getting container metrics, 0 otherwise # TYPE scrape_error gauge scrape_error 0 diff --git a/pkg/kubelet/metrics/metrics.go b/pkg/kubelet/metrics/metrics.go index 0897b59eb2195..c79e6c9bf9ab5 100644 --- a/pkg/kubelet/metrics/metrics.go +++ b/pkg/kubelet/metrics/metrics.go @@ -33,9 +33,15 @@ const ( KubeletSubsystem = "kubelet" NodeNameKey = "node_name" NodeLabelKey = "node" + NodeStartupPreKubeletKey = "node_startup_pre_kubelet_duration_seconds" + NodeStartupPreRegistrationKey = "node_startup_pre_registration_duration_seconds" + NodeStartupRegistrationKey = "node_startup_registration_duration_seconds" + NodeStartupPostRegistrationKey = "node_startup_post_registration_duration_seconds" + NodeStartupKey = "node_startup_duration_seconds" PodWorkerDurationKey = "pod_worker_duration_seconds" PodStartDurationKey = "pod_start_duration_seconds" PodStartSLIDurationKey = "pod_start_sli_duration_seconds" + PodStartTotalDurationKey = "pod_start_total_duration_seconds" CgroupManagerOperationsKey = "cgroup_manager_duration_seconds" PodWorkerStartDurationKey = "pod_worker_start_duration_seconds" PodStatusSyncDurationKey = "pod_status_sync_duration_seconds" @@ -111,12 +117,19 @@ const ( orphanPodCleanedVolumesKey = "orphan_pod_cleaned_volumes" orphanPodCleanedVolumesErrorsKey = "orphan_pod_cleaned_volumes_errors" + // Metric for tracking garbage collected images + ImageGarbageCollectedTotalKey = "image_garbage_collected_total" + // Values used in metric labels Container = "container" InitContainer = "init_container" EphemeralContainer = "ephemeral_container" ) +var ( + podStartupDurationBuckets = []float64{0.5, 1, 2, 3, 4, 5, 6, 8, 10, 20, 30, 45, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600} +) + var ( // NodeName is a Gauge that tracks the ode's name. The count is always 1. NodeName = metrics.NewGaugeVec( @@ -157,7 +170,7 @@ var ( Subsystem: KubeletSubsystem, Name: PodStartDurationKey, Help: "Duration in seconds from kubelet seeing a pod for the first time to the pod starting to run", - Buckets: metrics.DefBuckets, + Buckets: podStartupDurationBuckets, StabilityLevel: metrics.ALPHA, }, ) @@ -174,11 +187,30 @@ var ( Subsystem: KubeletSubsystem, Name: PodStartSLIDurationKey, Help: "Duration in seconds to start a pod, excluding time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch", - Buckets: []float64{0.5, 1, 2, 3, 4, 5, 6, 8, 10, 20, 30, 45, 60, 120, 180, 240, 300, 360, 480, 600, 900, 1200, 1800, 2700, 3600}, + Buckets: podStartupDurationBuckets, StabilityLevel: metrics.ALPHA, }, []string{}, ) + + // PodStartTotalDuration is a Histogram that tracks the duration (in seconds) it takes for a single pod to run + // since creation, including the time for image pulling. + // + // The histogram bucket boundaries for pod startup latency metrics, measured in seconds. These are hand-picked + // so as to be roughly exponential but still round numbers in everyday units. This is to minimise the number + // of buckets while allowing accurate measurement of thresholds which might be used in SLOs + // e.g. x% of pods start up within 30 seconds, or 15 minutes, etc. + PodStartTotalDuration = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: KubeletSubsystem, + Name: PodStartTotalDurationKey, + Help: "Duration in seconds to start a pod since creation, including time to pull images and run init containers, measured from pod creation timestamp to when all its containers are reported as started and observed via watch", + Buckets: podStartupDurationBuckets, + StabilityLevel: metrics.ALPHA, + }, + []string{}, + ) + // CgroupManagerDuration is a Histogram that tracks the duration (in seconds) it takes for cgroup manager operations to complete. // Broken down by method. CgroupManagerDuration = metrics.NewHistogramVec( @@ -736,6 +768,60 @@ var ( StabilityLevel: metrics.ALPHA, }, ) + + NodeStartupPreKubeletDuration = metrics.NewGauge( + &metrics.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: NodeStartupPreKubeletKey, + Help: "Duration in seconds of node startup before kubelet starts.", + StabilityLevel: metrics.ALPHA, + }, + ) + + NodeStartupPreRegistrationDuration = metrics.NewGauge( + &metrics.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: NodeStartupPreRegistrationKey, + Help: "Duration in seconds of node startup before registration.", + StabilityLevel: metrics.ALPHA, + }, + ) + + NodeStartupRegistrationDuration = metrics.NewGauge( + &metrics.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: NodeStartupRegistrationKey, + Help: "Duration in seconds of node startup during registration.", + StabilityLevel: metrics.ALPHA, + }, + ) + + NodeStartupPostRegistrationDuration = metrics.NewGauge( + &metrics.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: NodeStartupPostRegistrationKey, + Help: "Duration in seconds of node startup after registration.", + StabilityLevel: metrics.ALPHA, + }, + ) + + NodeStartupDuration = metrics.NewGauge( + &metrics.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: NodeStartupKey, + Help: "Duration in seconds of node startup in total.", + StabilityLevel: metrics.ALPHA, + }, + ) + + ImageGarbageCollectedTotal = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: ImageGarbageCollectedTotalKey, + Help: "Total number of images garbage collected by the kubelet, whether through disk usage or image age.", + StabilityLevel: metrics.ALPHA, + }, + ) ) var registerMetrics sync.Once @@ -748,6 +834,12 @@ func Register(collectors ...metrics.StableCollector) { legacyregistry.MustRegister(PodWorkerDuration) legacyregistry.MustRegister(PodStartDuration) legacyregistry.MustRegister(PodStartSLIDuration) + legacyregistry.MustRegister(PodStartTotalDuration) + legacyregistry.MustRegister(NodeStartupPreKubeletDuration) + legacyregistry.MustRegister(NodeStartupPreRegistrationDuration) + legacyregistry.MustRegister(NodeStartupRegistrationDuration) + legacyregistry.MustRegister(NodeStartupPostRegistrationDuration) + legacyregistry.MustRegister(NodeStartupDuration) legacyregistry.MustRegister(CgroupManagerDuration) legacyregistry.MustRegister(PodWorkerStartDuration) legacyregistry.MustRegister(PodStatusSyncDuration) diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index bf3f6e05a29c0..8515e0a7befb0 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -123,11 +123,19 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs if externalCloudProvider { // If --cloud-provider=external and node address is already set, // then we return early because provider set addresses should take precedence. - // Otherwise, we try to look up the node IP and let the cloud provider override it later + // Otherwise, we try to use the node IP defined via flags and let the cloud provider override it later // This should alleviate a lot of the bootstrapping issues with out-of-tree providers if len(node.Status.Addresses) > 0 { return nil } + // If nodeIPs are not specified wait for the external cloud-provider to set the node addresses. + // Otherwise uses them on the assumption that the installer/administrator has the previous knowledge + // required to ensure the external cloud provider will use the same addresses to avoid the issues explained + // in https://github.com/kubernetes/kubernetes/issues/120720. + // We are already hinting the external cloud provider via the annotation AnnotationAlphaProvidedIPAddr. + if !nodeIPSpecified { + return nil + } } if cloud != nil { cloudNodeAddresses, err := nodeAddressesFunc() diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index 3e2aef43e0a87..28033f1c7878a 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -223,7 +223,7 @@ func TestNodeAddress(t *testing.T) { shouldError: false, }, { - name: "cloud provider is external", + name: "cloud provider is external and nodeIP specified", nodeIP: netutils.ParseIPSloppy("10.0.0.1"), nodeAddresses: []v1.NodeAddress{}, cloudProviderType: cloudProviderExternal, @@ -233,6 +233,21 @@ func TestNodeAddress(t *testing.T) { }, shouldError: false, }, + { + name: "cloud provider is external and nodeIP unspecified", + nodeIP: netutils.ParseIPSloppy("::"), + nodeAddresses: []v1.NodeAddress{}, + cloudProviderType: cloudProviderExternal, + expectedAddresses: []v1.NodeAddress{}, + shouldError: false, + }, + { + name: "cloud provider is external and no nodeIP", + nodeAddresses: []v1.NodeAddress{}, + cloudProviderType: cloudProviderExternal, + expectedAddresses: []v1.NodeAddress{}, + shouldError: false, + }, { name: "cloud doesn't report hostname, no override, detected hostname mismatch", nodeAddresses: []v1.NodeAddress{ diff --git a/pkg/kubelet/pleg/evented.go b/pkg/kubelet/pleg/evented.go index cbca33f394ca5..ef44ff5c9944d 100644 --- a/pkg/kubelet/pleg/evented.go +++ b/pkg/kubelet/pleg/evented.go @@ -71,7 +71,7 @@ type EventedPLEG struct { // For testability. clock clock.Clock // GenericPLEG is used to force relist when required. - genericPleg PodLifecycleEventGenerator + genericPleg podLifecycleEventGeneratorHandler // The maximum number of retries when getting container events from the runtime. eventedPlegMaxStreamRetries int // Indicates relisting related parameters @@ -87,17 +87,21 @@ type EventedPLEG struct { // NewEventedPLEG instantiates a new EventedPLEG object and return it. func NewEventedPLEG(runtime kubecontainer.Runtime, runtimeService internalapi.RuntimeService, eventChannel chan *PodLifecycleEvent, cache kubecontainer.Cache, genericPleg PodLifecycleEventGenerator, eventedPlegMaxStreamRetries int, - relistDuration *RelistDuration, clock clock.Clock) PodLifecycleEventGenerator { + relistDuration *RelistDuration, clock clock.Clock) (PodLifecycleEventGenerator, error) { + handler, ok := genericPleg.(podLifecycleEventGeneratorHandler) + if !ok { + return nil, fmt.Errorf("%v doesn't implement podLifecycleEventGeneratorHandler interface", genericPleg) + } return &EventedPLEG{ runtime: runtime, runtimeService: runtimeService, eventChannel: eventChannel, cache: cache, - genericPleg: genericPleg, + genericPleg: handler, eventedPlegMaxStreamRetries: eventedPlegMaxStreamRetries, relistDuration: relistDuration, clock: clock, - } + }, nil } // Watch returns a channel from which the subscriber can receive PodLifecycleEvent events. diff --git a/pkg/kubelet/pleg/pleg.go b/pkg/kubelet/pleg/pleg.go index 2654f32d6fc26..0a44745925b54 100644 --- a/pkg/kubelet/pleg/pleg.go +++ b/pkg/kubelet/pleg/pleg.go @@ -64,10 +64,16 @@ type PodLifecycleEvent struct { // PodLifecycleEventGenerator contains functions for generating pod life cycle events. type PodLifecycleEventGenerator interface { Start() - Stop() - Update(relistDuration *RelistDuration) Watch() chan *PodLifecycleEvent Healthy() (bool, error) - Relist() UpdateCache(*kubecontainer.Pod, types.UID) (error, bool) } + +// podLifecycleEventGeneratorHandler contains functions that are useful for different PLEGs +// and need not be exposed to rest of the kubelet +type podLifecycleEventGeneratorHandler interface { + PodLifecycleEventGenerator + Stop() + Update(relistDuration *RelistDuration) + Relist() +} diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 20e8b493a8f44..8be367f8f0e05 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -716,19 +716,17 @@ func (p *podWorkers) IsPodForMirrorPodTerminatingByFullName(podFullName string) } func isPodStatusCacheTerminal(status *kubecontainer.PodStatus) bool { - runningContainers := 0 - runningSandboxes := 0 for _, container := range status.ContainerStatuses { if container.State == kubecontainer.ContainerStateRunning { - runningContainers++ + return false } } for _, sb := range status.SandboxStatuses { if sb.State == runtimeapi.PodSandboxState_SANDBOX_READY { - runningSandboxes++ + return false } } - return runningContainers == 0 && runningSandboxes == 0 + return true } // UpdatePod carries a configuration change or termination state to a pod. A pod is either runnable, @@ -1658,7 +1656,7 @@ func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.K // we timeout and return an error if we don't get a callback within a reasonable time. // the default timeout is relative to the grace period (we settle on 10s to wait for kubelet->runtime traffic to complete in sigkill) - timeout := int64(gracePeriod + (gracePeriod / 2)) + timeout := gracePeriod + (gracePeriod / 2) minTimeout := int64(10) if timeout < minTimeout { timeout = minTimeout diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 7ee617cc5fe59..87a017f9ccc31 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -78,8 +78,8 @@ import ( "k8s.io/kubernetes/pkg/apis/core/v1/validation" "k8s.io/kubernetes/pkg/features" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" + apisgrpc "k8s.io/kubernetes/pkg/kubelet/apis/grpc" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" - podresourcesgrpc "k8s.io/kubernetes/pkg/kubelet/apis/podresources/grpc" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober" servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics" @@ -219,7 +219,7 @@ type PodResourcesProviders struct { // ListenAndServePodResources initializes a gRPC server to serve the PodResources service func ListenAndServePodResources(endpoint string, providers podresources.PodResourcesProviders) { - server := grpc.NewServer(podresourcesgrpc.WithRateLimiter(podresourcesgrpc.DefaultQPS, podresourcesgrpc.DefaultBurstTokens)) + server := grpc.NewServer(apisgrpc.WithRateLimiter("podresources", podresources.DefaultQPS, podresources.DefaultBurstTokens)) podresourcesapiv1alpha1.RegisterPodResourcesListerServer(server, podresources.NewV1alpha1PodResourcesServer(providers)) podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewV1PodResourcesServer(providers)) @@ -377,9 +377,8 @@ func (s *Server) InstallDefaultHandlers() { healthz.NamedCheck("syncloop", s.syncLoopHealthCheck), ) - if utilfeature.DefaultFeatureGate.Enabled(metricsfeatures.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(s.restfulCont) - } + slis.SLIMetricsWithReset{}.Install(s.restfulCont) + s.addMetricsBucketMatcher("pods") ws := new(restful.WebService) ws. diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 3ee71958dd057..8faeb2314080b 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -18,6 +18,7 @@ package server import ( "context" + "crypto/tls" "errors" "fmt" "io" @@ -959,7 +960,10 @@ func TestServeExecInContainerIdleTimeout(t *testing.T) { url := fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?c=ls&c=-a&" + api.ExecStdinParam + "=1" - upgradeRoundTripper := spdy.NewRoundTripper(nil) + upgradeRoundTripper, err := spdy.NewRoundTripper(&tls.Config{}) + if err != nil { + t.Fatalf("Error creating SpdyRoundTripper: %v", err) + } c := &http.Client{Transport: upgradeRoundTripper} resp, err := c.Do(makeReq(t, "POST", url, "v4.channel.k8s.io")) @@ -1115,7 +1119,10 @@ func testExecAttach(t *testing.T, verb string) { upgradeRoundTripper httpstream.UpgradeRoundTripper c *http.Client ) - upgradeRoundTripper = spdy.NewRoundTripper(nil) + upgradeRoundTripper, err = spdy.NewRoundTripper(&tls.Config{}) + if err != nil { + t.Fatalf("Error creating SpdyRoundTripper: %v", err) + } c = &http.Client{Transport: upgradeRoundTripper} resp, err = c.Do(makeReq(t, "POST", url, "v4.channel.k8s.io")) @@ -1211,7 +1218,10 @@ func TestServePortForwardIdleTimeout(t *testing.T) { url := fw.testHTTPServer.URL + "/portForward/" + podNamespace + "/" + podName - upgradeRoundTripper := spdy.NewRoundTripper(nil) + upgradeRoundTripper, err := spdy.NewRoundTripper(&tls.Config{}) + if err != nil { + t.Fatalf("Error creating SpdyRoundTripper: %v", err) + } c := &http.Client{Transport: upgradeRoundTripper} req := makeReq(t, "POST", url, "portforward.k8s.io") @@ -1310,7 +1320,10 @@ func TestServePortForward(t *testing.T) { c *http.Client ) - upgradeRoundTripper = spdy.NewRoundTripper(nil) + upgradeRoundTripper, err = spdy.NewRoundTripper(&tls.Config{}) + if err != nil { + t.Fatalf("Error creating SpdyRoundTripper: %v", err) + } c = &http.Client{Transport: upgradeRoundTripper} req := makeReq(t, "POST", url, "portforward.k8s.io") diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index d02cee4f56020..3d442fdb02b50 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -26,6 +26,7 @@ import ( "sync" "time" + cadvisormemory "github.com/google/cadvisor/cache/memory" cadvisorfs "github.com/google/cadvisor/fs" cadvisorapiv2 "github.com/google/cadvisor/info/v2" "google.golang.org/grpc/codes" @@ -203,7 +204,10 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd } // Fill available stats for full set of required pod stats - cs := p.makeContainerStats(stats, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage) + cs, err := p.makeContainerStats(stats, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage) + if err != nil { + return nil, fmt.Errorf("make container stats: %w", err) + } p.addPodNetworkStats(ps, podSandboxID, caInfos, cs, containerNetworkStats[podSandboxID]) p.addPodCPUMemoryStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs) p.addSwapStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs) @@ -248,7 +252,9 @@ func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, upda continue } ps := buildPodStats(podSandbox) - p.addCRIPodContainerStats(criSandboxStat, ps, fsIDtoInfo, containerMap, podSandbox, rootFsInfo, updateCPUNanoCoreUsage) + if err := p.addCRIPodContainerStats(criSandboxStat, ps, fsIDtoInfo, containerMap, podSandbox, rootFsInfo, updateCPUNanoCoreUsage); err != nil { + return nil, fmt.Errorf("add CRI pod container stats: %w", err) + } addCRIPodNetworkStats(ps, criSandboxStat) addCRIPodCPUStats(ps, criSandboxStat) addCRIPodMemoryStats(ps, criSandboxStat) @@ -389,10 +395,10 @@ func (p *criStatsProvider) ImageFsStats(ctx context.Context) (*statsapi.FsStats, // return the first one. // // TODO(yguo0905): Support returning stats of multiple image filesystems. - if len(resp) == 0 { + if len(resp.GetImageFilesystems()) == 0 { return nil, fmt.Errorf("imageFs information is unavailable") } - fs := resp[0] + fs := resp.GetImageFilesystems()[0] s := &statsapi.FsStats{ Time: metav1.NewTime(time.Unix(0, fs.Timestamp)), UsedBytes: &fs.UsedBytes.Value, @@ -400,7 +406,10 @@ func (p *criStatsProvider) ImageFsStats(ctx context.Context) (*statsapi.FsStats, if fs.InodesUsed != nil { s.InodesUsed = &fs.InodesUsed.Value } - imageFsInfo := p.getFsInfo(fs.GetFsId()) + imageFsInfo, err := p.getFsInfo(fs.GetFsId()) + if err != nil { + return nil, fmt.Errorf("get filesystem info: %w", err) + } if imageFsInfo != nil { // The image filesystem id is unknown to the local node or there's // an error on retrieving the stats. In these cases, we omit those @@ -421,8 +430,11 @@ func (p *criStatsProvider) ImageFsDevice(ctx context.Context) (string, error) { if err != nil { return "", err } - for _, fs := range resp { - fsInfo := p.getFsInfo(fs.GetFsId()) + for _, fs := range resp.GetImageFilesystems() { + fsInfo, err := p.getFsInfo(fs.GetFsId()) + if err != nil { + return "", fmt.Errorf("get filesystem info: %w", err) + } if fsInfo != nil { return fsInfo.Device, nil } @@ -433,23 +445,26 @@ func (p *criStatsProvider) ImageFsDevice(ctx context.Context) (string, error) { // getFsInfo returns the information of the filesystem with the specified // fsID. If any error occurs, this function logs the error and returns // nil. -func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cadvisorapiv2.FsInfo { +func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) (*cadvisorapiv2.FsInfo, error) { if fsID == nil { klog.V(2).InfoS("Failed to get filesystem info: fsID is nil") - return nil + return nil, nil } mountpoint := fsID.GetMountpoint() fsInfo, err := p.cadvisor.GetDirFsInfo(mountpoint) if err != nil { msg := "Failed to get the info of the filesystem with mountpoint" - if err == cadvisorfs.ErrNoSuchDevice { + if errors.Is(err, cadvisorfs.ErrNoSuchDevice) || + errors.Is(err, cadvisorfs.ErrDeviceNotInPartitionsMap) || + errors.Is(err, cadvisormemory.ErrDataNotFound) { klog.V(2).InfoS(msg, "mountpoint", mountpoint, "err", err) } else { klog.ErrorS(err, msg, "mountpoint", mountpoint) + return nil, fmt.Errorf("%s: %w", msg, err) } - return nil + return nil, nil } - return &fsInfo + return &fsInfo, nil } // buildPodStats returns a PodStats that identifies the Pod managing cinfo @@ -587,7 +602,7 @@ func (p *criStatsProvider) makeContainerStats( fsIDtoInfo map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo, meta *runtimeapi.PodSandboxMetadata, updateCPUNanoCoreUsage bool, -) *statsapi.ContainerStats { +) (*statsapi.ContainerStats, error) { result := &statsapi.ContainerStats{ Name: stats.Attributes.Metadata.Name, // The StartTime in the summary API is the container creation time. @@ -649,10 +664,14 @@ func (p *criStatsProvider) makeContainerStats( } } fsID := stats.GetWritableLayer().GetFsId() + var err error if fsID != nil { imageFsInfo, found := fsIDtoInfo[*fsID] if !found { - imageFsInfo = p.getFsInfo(fsID) + imageFsInfo, err = p.getFsInfo(fsID) + if err != nil { + return nil, fmt.Errorf("get filesystem info: %w", err) + } fsIDtoInfo[*fsID] = imageFsInfo } if imageFsInfo != nil { @@ -669,12 +688,11 @@ func (p *criStatsProvider) makeContainerStats( // NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers // using old log path, empty log stats are returned. This is fine, because we don't // officially support in-place upgrade anyway. - var err error result.Logs, err = p.hostStatsProvider.getPodContainerLogStats(meta.GetNamespace(), meta.GetName(), types.UID(meta.GetUid()), container.GetMetadata().GetName(), rootFsInfo) if err != nil { klog.ErrorS(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName()) } - return result + return result, nil } func (p *criStatsProvider) makeContainerCPUAndMemoryStats( diff --git a/pkg/kubelet/stats/cri_stats_provider_linux.go b/pkg/kubelet/stats/cri_stats_provider_linux.go index 2bfa0fd48f38a..9d8a0e479faff 100644 --- a/pkg/kubelet/stats/cri_stats_provider_linux.go +++ b/pkg/kubelet/stats/cri_stats_provider_linux.go @@ -20,6 +20,7 @@ limitations under the License. package stats import ( + "fmt" "time" cadvisorapiv2 "github.com/google/cadvisor/info/v2" @@ -32,17 +33,21 @@ func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.Po ps *statsapi.PodStats, fsIDtoInfo map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo, containerMap map[string]*runtimeapi.Container, podSandbox *runtimeapi.PodSandbox, - rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) { + rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) error { for _, criContainerStat := range criSandboxStat.Linux.Containers { container, found := containerMap[criContainerStat.Attributes.Id] if !found { continue } // Fill available stats for full set of required pod stats - cs := p.makeContainerStats(criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), + cs, err := p.makeContainerStats(criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata(), updateCPUNanoCoreUsage) + if err != nil { + return fmt.Errorf("make container stats: %w", err) + } ps.Containers = append(ps.Containers, *cs) } + return nil } func addCRIPodNetworkStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) { diff --git a/pkg/kubelet/stats/cri_stats_provider_others.go b/pkg/kubelet/stats/cri_stats_provider_others.go index fb18e89f16cb4..55ed8e7a20be6 100644 --- a/pkg/kubelet/stats/cri_stats_provider_others.go +++ b/pkg/kubelet/stats/cri_stats_provider_others.go @@ -35,7 +35,8 @@ func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.Po ps *statsapi.PodStats, fsIDtoInfo map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo, containerMap map[string]*runtimeapi.Container, podSandbox *runtimeapi.PodSandbox, - rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) { + rootFsInfo *cadvisorapiv2.FsInfo, updateCPUNanoCoreUsage bool) error { + return nil } func addCRIPodNetworkStats(ps *statsapi.PodStats, criPodStat *runtimeapi.PodSandboxStats) { diff --git a/pkg/kubelet/stats/cri_stats_provider_test.go b/pkg/kubelet/stats/cri_stats_provider_test.go index 07fca7d2db2af..f24b1a025ae9d 100644 --- a/pkg/kubelet/stats/cri_stats_provider_test.go +++ b/pkg/kubelet/stats/cri_stats_provider_test.go @@ -898,9 +898,7 @@ func makeFakePodSandboxStatsStrictlyFromCRI(seed int, podSandbox *critest.FakePo }, Linux: &runtimeapi.LinuxPodSandboxStats{}, } - for _, cs := range podContainerStats { - podSandboxStats.Linux.Containers = append(podSandboxStats.Linux.Containers, cs) - } + podSandboxStats.Linux.Containers = append(podSandboxStats.Linux.Containers, podContainerStats...) if podSandbox.State == runtimeapi.PodSandboxState_SANDBOX_NOTREADY { podSandboxStats.Linux.Cpu = nil podSandboxStats.Linux.Memory = nil diff --git a/pkg/kubelet/stats/cri_stats_provider_windows.go b/pkg/kubelet/stats/cri_stats_provider_windows.go index e64da34c4dbd7..0ea6a864103a1 100644 --- a/pkg/kubelet/stats/cri_stats_provider_windows.go +++ b/pkg/kubelet/stats/cri_stats_provider_windows.go @@ -20,6 +20,7 @@ limitations under the License. package stats import ( + "fmt" "time" "github.com/Microsoft/hcsshim" @@ -86,16 +87,22 @@ func (p *criStatsProvider) addCRIPodContainerStats(criSandboxStat *runtimeapi.Po containerMap map[string]*runtimeapi.Container, podSandbox *runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo, - updateCPUNanoCoreUsage bool) { + updateCPUNanoCoreUsage bool) error { for _, criContainerStat := range criSandboxStat.Windows.Containers { container, found := containerMap[criContainerStat.Attributes.Id] if !found { continue } // Fill available stats for full set of required pod stats - cs := p.makeWinContainerStats(criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata()) + cs, err := p.makeWinContainerStats(criContainerStat, container, rootFsInfo, fsIDtoInfo, podSandbox.GetMetadata()) + if err != nil { + return fmt.Errorf("make container stats: %w", err) + + } ps.Containers = append(ps.Containers, *cs) } + + return nil } func (p *criStatsProvider) makeWinContainerStats( @@ -103,7 +110,7 @@ func (p *criStatsProvider) makeWinContainerStats( container *runtimeapi.Container, rootFsInfo *cadvisorapiv2.FsInfo, fsIDtoInfo map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo, - meta *runtimeapi.PodSandboxMetadata) *statsapi.ContainerStats { + meta *runtimeapi.PodSandboxMetadata) (*statsapi.ContainerStats, error) { result := &statsapi.ContainerStats{ Name: stats.Attributes.Metadata.Name, // The StartTime in the summary API is the container creation time. @@ -149,11 +156,15 @@ func (p *criStatsProvider) makeWinContainerStats( result.Rootfs.UsedBytes = &stats.WritableLayer.UsedBytes.Value } } + var err error fsID := stats.GetWritableLayer().GetFsId() if fsID != nil { imageFsInfo, found := fsIDtoInfo[*fsID] if !found { - imageFsInfo = p.getFsInfo(fsID) + imageFsInfo, err = p.getFsInfo(fsID) + if err != nil { + return nil, fmt.Errorf("get filesystem info: %w", err) + } fsIDtoInfo[*fsID] = imageFsInfo } if imageFsInfo != nil { @@ -168,12 +179,11 @@ func (p *criStatsProvider) makeWinContainerStats( // NOTE: This doesn't support the old pod log path, `/var/log/pods/UID`. For containers // using old log path, empty log stats are returned. This is fine, because we don't // officially support in-place upgrade anyway. - var err error result.Logs, err = p.hostStatsProvider.getPodContainerLogStats(meta.GetNamespace(), meta.GetName(), types.UID(meta.GetUid()), container.GetMetadata().GetName(), rootFsInfo) if err != nil { klog.ErrorS(err, "Unable to fetch container log stats", "containerName", container.GetMetadata().GetName()) } - return result + return result, nil } // hcsStatsToNetworkStats converts hcsshim.Statistics.Network to statsapi.NetworkStats diff --git a/pkg/kubelet/stats/pidlimit/pidlimit_linux.go b/pkg/kubelet/stats/pidlimit/pidlimit_linux.go index 0b6e95ffd8979..25e7655203211 100644 --- a/pkg/kubelet/stats/pidlimit/pidlimit_linux.go +++ b/pkg/kubelet/stats/pidlimit/pidlimit_linux.go @@ -36,7 +36,7 @@ func Stats() (*statsapi.RlimitStats, error) { rlimit := &statsapi.RlimitStats{} taskMax := int64(-1) - // Calculate the mininum of kernel.pid_max and kernel.threads-max as they both specify the + // Calculate the minimum of kernel.pid_max and kernel.threads-max as they both specify the // system-wide limit on the number of tasks. for _, file := range []string{"/proc/sys/kernel/pid_max", "/proc/sys/kernel/threads-max"} { if content, err := os.ReadFile(file); err == nil { diff --git a/pkg/kubelet/status/generate.go b/pkg/kubelet/status/generate.go index c6707345b6717..774cbadb4c273 100644 --- a/pkg/kubelet/status/generate.go +++ b/pkg/kubelet/status/generate.go @@ -247,12 +247,12 @@ func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, podStatus *kubecont // fresh sandbox and configure networking for the sandbox. if !newSandboxNeeded { return v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionTrue, } } return v1.PodCondition{ - Type: kubetypes.PodReadyToStartContainers, + Type: v1.PodReadyToStartContainers, Status: v1.ConditionFalse, } } diff --git a/pkg/kubelet/status/generate_test.go b/pkg/kubelet/status/generate_test.go index 6bb3207999837..85e252b5625c0 100644 --- a/pkg/kubelet/status/generate_test.go +++ b/pkg/kubelet/status/generate_test.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/utils/pointer" ) @@ -615,7 +614,7 @@ func TestGeneratePodReadyToStartContainersCondition(t *testing.T) { }, } { t.Run(desc, func(t *testing.T) { - test.expected.Type = kubetypes.PodReadyToStartContainers + test.expected.Type = v1.PodReadyToStartContainers condition := GeneratePodReadyToStartContainersCondition(test.pod, test.status) require.Equal(t, test.expected.Type, condition.Type) require.Equal(t, test.expected.Status, condition.Status) diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 0c359f2b3fdb5..b08923bae4bbf 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -634,7 +634,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp updateLastTransitionTime(&status, &oldStatus, v1.PodInitialized) // Set PodReadyToStartContainersCondition.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, kubetypes.PodReadyToStartContainers) + updateLastTransitionTime(&status, &oldStatus, v1.PodReadyToStartContainers) // Set PodScheduledCondition.LastTransitionTime. updateLastTransitionTime(&status, &oldStatus, v1.PodScheduled) diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 23b9f17f76f9d..d5f9f4936c7a3 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -500,7 +500,7 @@ func TestStatusNormalizationEnforcesMaxBytes(t *testing.T) { Name: fmt.Sprintf("container%d", i), LastTerminationState: v1.ContainerState{ Terminated: &v1.ContainerStateTerminated{ - Message: strings.Repeat("abcdefgh", int(24+i%3)), + Message: strings.Repeat("abcdefgh", 24+i%3), }, }, } diff --git a/pkg/kubelet/sysctl/safe_sysctls.go b/pkg/kubelet/sysctl/safe_sysctls.go index ea3fcd57d270f..098da3434c74c 100644 --- a/pkg/kubelet/sysctl/safe_sysctls.go +++ b/pkg/kubelet/sysctl/safe_sysctls.go @@ -25,24 +25,47 @@ import ( "k8s.io/kubernetes/pkg/proxy/ipvs" ) -// refer to https://github.com/torvalds/linux/commit/122ff243f5f104194750ecbc76d5946dd1eec934. -const ipLocalReservedPortsMinNamespacedKernelVersion = "3.16" - -var safeSysctls = []string{ - "kernel.shm_rmid_forced", - "net.ipv4.ip_local_port_range", - "net.ipv4.tcp_syncookies", - "net.ipv4.ping_group_range", - "net.ipv4.ip_unprivileged_port_start", +type sysctl struct { + // the name of sysctl + name string + // the minimum kernel version where the sysctl is available + kernel string } -var safeSysctlsIncludeReservedPorts = []string{ - "kernel.shm_rmid_forced", - "net.ipv4.ip_local_port_range", - "net.ipv4.tcp_syncookies", - "net.ipv4.ping_group_range", - "net.ipv4.ip_unprivileged_port_start", - "net.ipv4.ip_local_reserved_ports", +var safeSysctls = []sysctl{ + { + name: "kernel.shm_rmid_forced", + }, { + name: "net.ipv4.ip_local_port_range", + }, { + name: "net.ipv4.tcp_syncookies", + }, { + name: "net.ipv4.ping_group_range", + }, { + name: "net.ipv4.ip_unprivileged_port_start", + }, { + name: "net.ipv4.ip_local_reserved_ports", + // refer to https://github.com/torvalds/linux/commit/122ff243f5f104194750ecbc76d5946dd1eec934. + kernel: "3.16", + }, { + name: "net.ipv4.tcp_keepalive_time", + // refer to https://github.com/torvalds/linux/commit/13b287e8d1cad951634389f85b8c9b816bd3bb1e. + kernel: "4.5", + }, { + // refer to https://github.com/torvalds/linux/commit/1e579caa18b96f9eb18f4f5416658cd15f37c062. + name: "net.ipv4.tcp_fin_timeout", + kernel: "4.6", + }, + { + // refer to https://github.com/torvalds/linux/commit/b840d15d39128d08ed4486085e5507d2617b9ae1. + name: "net.ipv4.tcp_keepalive_intvl", + kernel: "4.5", + }, + { + // refer to https://github.com/torvalds/linux/commit/9bd6861bd4326e3afd3f14a9ec8a723771fb20bb. + name: "net.ipv4.tcp_keepalive_probes", + kernel: "4.5", + }, } // SafeSysctlAllowlist returns the allowlist of safe sysctls and safe sysctl patterns (ending in *). @@ -51,19 +74,32 @@ var safeSysctlsIncludeReservedPorts = []string{ // - it is namespaced in the container or the pod // - it is isolated, i.e. has no influence on any other pod on the same node. func SafeSysctlAllowlist() []string { - if goruntime.GOOS == "linux" { - // make sure we're on a new enough kernel that the ip_local_reserved_ports sysctl is namespaced - kernelVersion, err := getKernelVersion() - if err != nil { - klog.ErrorS(err, "Failed to get kernel version, dropping net.ipv4.ip_local_reserved_ports from safe sysctl list") - return safeSysctls + if goruntime.GOOS != "linux" { + return nil + } + return getSafeSysctlAllowlist(getKernelVersion) +} + +func getSafeSysctlAllowlist(getVersion func() (*version.Version, error)) []string { + kernelVersion, err := getVersion() + if err != nil { + klog.ErrorS(err, "failed to get kernel version, unable to determine which sysctls are available") + } + + var safeSysctlAllowlist []string + for _, sc := range safeSysctls { + if sc.kernel == "" { + safeSysctlAllowlist = append(safeSysctlAllowlist, sc.name) + continue } - if kernelVersion.LessThan(version.MustParseGeneric(ipLocalReservedPortsMinNamespacedKernelVersion)) { - klog.ErrorS(nil, "Kernel version is too old, dropping net.ipv4.ip_local_reserved_ports from safe sysctl list", "kernelVersion", kernelVersion) - return safeSysctls + + if kernelVersion != nil && kernelVersion.AtLeast(version.MustParseGeneric(sc.kernel)) { + safeSysctlAllowlist = append(safeSysctlAllowlist, sc.name) + } else { + klog.InfoS("kernel version is too old, dropping the sysctl from safe sysctl list", "kernelVersion", kernelVersion, "sysctl", sc.name) } } - return safeSysctlsIncludeReservedPorts + return safeSysctlAllowlist } func getKernelVersion() (*version.Version, error) { @@ -71,6 +107,7 @@ func getKernelVersion() (*version.Version, error) { if err != nil { return nil, fmt.Errorf("failed to get kernel version: %w", err) } + kernelVersion, err := version.ParseGeneric(kernelVersionStr) if err != nil { return nil, fmt.Errorf("failed to parse kernel version: %w", err) diff --git a/pkg/kubelet/sysctl/safe_sysctls_test.go b/pkg/kubelet/sysctl/safe_sysctls_test.go new file mode 100644 index 0000000000000..2fef48157d72e --- /dev/null +++ b/pkg/kubelet/sysctl/safe_sysctls_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sysctl + +import ( + "fmt" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/version" +) + +func Test_getSafeSysctlAllowlist(t *testing.T) { + tests := []struct { + name string + getVersion func() (*version.Version, error) + want []string + }{ + { + name: "failed to get kernelVersion, only return safeSysctls with no kernelVersion limit", + getVersion: func() (*version.Version, error) { + return nil, fmt.Errorf("fork error") + }, + want: []string{ + "kernel.shm_rmid_forced", + "net.ipv4.ip_local_port_range", + "net.ipv4.tcp_syncookies", + "net.ipv4.ping_group_range", + "net.ipv4.ip_unprivileged_port_start", + }, + }, + { + name: "kernelVersion is 3.18.0, return safeSysctls with no kernelVersion limit and net.ipv4.ip_local_reserved_ports", + getVersion: func() (*version.Version, error) { + kernelVersionStr := "3.18.0-957.27.2.el7.x86_64" + return version.ParseGeneric(kernelVersionStr) + }, + want: []string{ + "kernel.shm_rmid_forced", + "net.ipv4.ip_local_port_range", + "net.ipv4.tcp_syncookies", + "net.ipv4.ping_group_range", + "net.ipv4.ip_unprivileged_port_start", + "net.ipv4.ip_local_reserved_ports", + }, + }, + { + name: "kernelVersion is 5.15.0, return safeSysctls with no kernelVersion limit and kernelVersion below 5.15.0", + getVersion: func() (*version.Version, error) { + kernelVersionStr := "5.15.0-75-generic" + return version.ParseGeneric(kernelVersionStr) + }, + want: []string{ + "kernel.shm_rmid_forced", + "net.ipv4.ip_local_port_range", + "net.ipv4.tcp_syncookies", + "net.ipv4.ping_group_range", + "net.ipv4.ip_unprivileged_port_start", + "net.ipv4.ip_local_reserved_ports", + "net.ipv4.tcp_keepalive_time", + "net.ipv4.tcp_fin_timeout", + "net.ipv4.tcp_keepalive_intvl", + "net.ipv4.tcp_keepalive_probes", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getSafeSysctlAllowlist(tt.getVersion); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getSafeSysctlAllowlist() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/kubelet/types/constants.go b/pkg/kubelet/types/constants.go index 3f085d22a11dc..796825aecb147 100644 --- a/pkg/kubelet/types/constants.go +++ b/pkg/kubelet/types/constants.go @@ -38,12 +38,3 @@ const ( LimitedSwap = "LimitedSwap" UnlimitedSwap = "UnlimitedSwap" ) - -// Alpha conditions managed by Kubelet that are not yet part of the API. The -// entries here should be moved to staging/src/k8s.io.api/core/v1/types.go -// once the feature managing the condition graduates to Beta. -const ( - // PodReadyToStartContainers pod sandbox is successfully configured and - // the pod is ready to launch containers. - PodReadyToStartContainers = "PodReadyToStartContainers" -) diff --git a/pkg/kubelet/types/pod_status.go b/pkg/kubelet/types/pod_status.go index f69ca822a0b2a..4a54d0ce3fa73 100644 --- a/pkg/kubelet/types/pod_status.go +++ b/pkg/kubelet/types/pod_status.go @@ -38,7 +38,7 @@ func PodConditionByKubelet(conditionType v1.PodConditionType) bool { } } if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) { - if conditionType == PodReadyToStartContainers { + if conditionType == v1.PodReadyToStartContainers { return true } } diff --git a/pkg/kubelet/types/pod_status_test.go b/pkg/kubelet/types/pod_status_test.go index 84cea89e1b968..dfdce44c45329 100644 --- a/pkg/kubelet/types/pod_status_test.go +++ b/pkg/kubelet/types/pod_status_test.go @@ -34,7 +34,7 @@ func TestPodConditionByKubelet(t *testing.T) { v1.PodReady, v1.PodInitialized, v1.ContainersReady, - PodReadyToStartContainers, + v1.PodReadyToStartContainers, } for _, tc := range trueCases { diff --git a/pkg/kubelet/types/pod_update_test.go b/pkg/kubelet/types/pod_update_test.go index 32100654363ee..41e32a8640e3f 100644 --- a/pkg/kubelet/types/pod_update_test.go +++ b/pkg/kubelet/types/pod_update_test.go @@ -27,7 +27,7 @@ import ( ) var ( - systemPriority = int32(scheduling.SystemCriticalPriority) + systemPriority = scheduling.SystemCriticalPriority systemPriorityUpper = systemPriority + 1000 ) diff --git a/pkg/kubelet/userns/userns_manager.go b/pkg/kubelet/userns/userns_manager.go index ffd23630f13eb..95b08184c95ab 100644 --- a/pkg/kubelet/userns/userns_manager.go +++ b/pkg/kubelet/userns/userns_manager.go @@ -268,6 +268,19 @@ func (m *UsernsManager) Release(podUID types.UID) { m.releaseWithLock(podUID) } +// podAllocated returns true if the pod is allocated, false otherwise. +func (m *UsernsManager) podAllocated(podUID types.UID) bool { + if !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + + _, ok := m.usedBy[podUID] + return ok +} + func (m *UsernsManager) releaseWithLock(pod types.UID) { v, ok := m.usedBy[pod] if !ok { @@ -374,7 +387,7 @@ func (m *UsernsManager) GetOrCreateUserNamespaceMappings(pod *v1.Pod) (*runtimea m.lock.Lock() defer m.lock.Unlock() - if pod.Spec.HostUsers == nil || *pod.Spec.HostUsers == true { + if pod.Spec.HostUsers == nil || *pod.Spec.HostUsers { return &runtimeapi.UserNamespace{ Mode: runtimeapi.NamespaceMode_NODE, }, nil diff --git a/pkg/kubelet/userns/userns_manager_disabled_test.go b/pkg/kubelet/userns/userns_manager_disabled_test.go new file mode 100644 index 0000000000000..1da50867b165e --- /dev/null +++ b/pkg/kubelet/userns/userns_manager_disabled_test.go @@ -0,0 +1,70 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package userns + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" + pkgfeatures "k8s.io/kubernetes/pkg/features" +) + +// Test all public methods behave ok when the feature gate is disabled. + +func TestMakeUserNsManagerDisabled(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, false)() + + testUserNsPodsManager := &testUserNsPodsManager{} + _, err := MakeUserNsManager(testUserNsPodsManager) + assert.NoError(t, err) +} + +func TestReleaseDisabled(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, false)() + + testUserNsPodsManager := &testUserNsPodsManager{} + m, err := MakeUserNsManager(testUserNsPodsManager) + require.NoError(t, err) + + m.Release("some-pod") +} + +func TestGetOrCreateUserNamespaceMappingsDisabled(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, false)() + + testUserNsPodsManager := &testUserNsPodsManager{} + m, err := MakeUserNsManager(testUserNsPodsManager) + require.NoError(t, err) + + userns, err := m.GetOrCreateUserNamespaceMappings(nil) + assert.NoError(t, err) + assert.Nil(t, userns) +} + +func TestCleanupOrphanedPodUsernsAllocationsDisabled(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, false)() + + testUserNsPodsManager := &testUserNsPodsManager{} + m, err := MakeUserNsManager(testUserNsPodsManager) + require.NoError(t, err) + + err = m.CleanupOrphanedPodUsernsAllocations(nil, nil) + assert.NoError(t, err) +} diff --git a/pkg/kubelet/userns/userns_manager_test.go b/pkg/kubelet/userns/userns_manager_test.go index fc74025d75d0e..d2156dd4c6d24 100644 --- a/pkg/kubelet/userns/userns_manager_test.go +++ b/pkg/kubelet/userns/userns_manager_test.go @@ -22,21 +22,33 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" pkgfeatures "k8s.io/kubernetes/pkg/features" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) type testUserNsPodsManager struct { + podDir string + podList []types.UID } func (m *testUserNsPodsManager) GetPodDir(podUID types.UID) string { - return "/tmp/non-existant-dir.This-is-not-used-in-tests" + if m.podDir == "" { + return "/tmp/non-existant-dir.This-is-not-used-in-tests" + } + return m.podDir } func (m *testUserNsPodsManager) ListPodsFromDisk() ([]types.UID, error) { - return nil, nil + if len(m.podList) == 0 { + return nil, nil + } + return m.podList, nil } func TestUserNsManagerAllocate(t *testing.T) { @@ -171,3 +183,179 @@ func TestUserNsManagerParseUserNsFile(t *testing.T) { }) } } + +func TestGetOrCreateUserNamespaceMappings(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, true)() + + trueVal := true + falseVal := false + + cases := []struct { + name string + pod *v1.Pod + expMode runtimeapi.NamespaceMode + success bool + }{ + { + name: "no user namespace", + pod: &v1.Pod{}, + expMode: runtimeapi.NamespaceMode_NODE, + success: true, + }, + { + name: "opt-in to host user namespace", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + HostUsers: &trueVal, + }, + }, + expMode: runtimeapi.NamespaceMode_NODE, + success: true, + }, + { + name: "user namespace", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + HostUsers: &falseVal, + }, + }, + expMode: runtimeapi.NamespaceMode_POD, + success: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // These tests will create the userns file, so use an existing podDir. + testUserNsPodsManager := &testUserNsPodsManager{podDir: t.TempDir()} + m, err := MakeUserNsManager(testUserNsPodsManager) + assert.NoError(t, err) + + userns, err := m.GetOrCreateUserNamespaceMappings(tc.pod) + if (tc.success && err != nil) || (!tc.success && err == nil) { + t.Errorf("expected success: %v but got error: %v", tc.success, err) + } + + if userns.GetMode() != tc.expMode { + t.Errorf("expected mode: %v but got: %v", tc.expMode, userns.GetMode()) + } + }) + } +} + +func TestCleanupOrphanedPodUsernsAllocations(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, true)() + + cases := []struct { + name string + runningPods []*kubecontainer.Pod + pods []*v1.Pod + listPods []types.UID /* pods to list */ + podSetBeforeCleanup []types.UID /* pods to record before cleanup */ + podSetAfterCleanup []types.UID /* pods set expected after cleanup */ + podUnsetAfterCleanup []types.UID /* pods set expected after cleanup */ + }{ + { + name: "no stale pods", + listPods: []types.UID{"pod-1", "pod-2"}, + }, + { + name: "no stale pods set", + podSetBeforeCleanup: []types.UID{"pod-1", "pod-2"}, + listPods: []types.UID{"pod-1", "pod-2"}, + podUnsetAfterCleanup: []types.UID{"pod-1", "pod-2"}, + }, + { + name: "one running pod", + listPods: []types.UID{"pod-1", "pod-2"}, + podSetBeforeCleanup: []types.UID{"pod-1", "pod-2"}, + runningPods: []*kubecontainer.Pod{{ID: "pod-1"}}, + podSetAfterCleanup: []types.UID{"pod-1"}, + podUnsetAfterCleanup: []types.UID{"pod-2"}, + }, + { + name: "pod set before cleanup but not listed ==> unset", + podSetBeforeCleanup: []types.UID{"pod-1", "pod-2"}, + runningPods: []*kubecontainer.Pod{{ID: "pod-1"}}, + podUnsetAfterCleanup: []types.UID{"pod-1", "pod-2"}, + }, + { + name: "one pod", + listPods: []types.UID{"pod-1", "pod-2"}, + podSetBeforeCleanup: []types.UID{"pod-1", "pod-2"}, + pods: []*v1.Pod{{ObjectMeta: metav1.ObjectMeta{UID: "pod-1"}}}, + podSetAfterCleanup: []types.UID{"pod-1"}, + podUnsetAfterCleanup: []types.UID{"pod-2"}, + }, + { + name: "no listed pods ==> all unset", + podSetBeforeCleanup: []types.UID{"pod-1", "pod-2"}, + podUnsetAfterCleanup: []types.UID{"pod-1", "pod-2"}, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + testUserNsPodsManager := &testUserNsPodsManager{ + podList: tc.listPods, + } + m, err := MakeUserNsManager(testUserNsPodsManager) + require.NoError(t, err) + + // Record the userns range as used + for i, pod := range tc.podSetBeforeCleanup { + err := m.record(pod, uint32((i+1)*65536), 65536) + require.NoError(t, err) + } + + err = m.CleanupOrphanedPodUsernsAllocations(tc.pods, tc.runningPods) + require.NoError(t, err) + + for _, pod := range tc.podSetAfterCleanup { + ok := m.podAllocated(pod) + assert.True(t, ok, "pod %q should be allocated", pod) + } + + for _, pod := range tc.podUnsetAfterCleanup { + ok := m.podAllocated(pod) + assert.False(t, ok, "pod %q should not be allocated", pod) + } + }) + } +} + +func TestAllocateMaxPods(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, true)() + + testUserNsPodsManager := &testUserNsPodsManager{} + m, err := MakeUserNsManager(testUserNsPodsManager) + require.NoError(t, err) + + // The first maxPods allocations should succeed. + for i := 0; i < maxPods; i++ { + _, _, err = m.allocateOne(types.UID(fmt.Sprintf("%d", i))) + require.NoError(t, err) + } + + // The next allocation should fail, hitting maxPods. + _, _, err = m.allocateOne(types.UID(fmt.Sprintf("%d", maxPods+1))) + assert.Error(t, err) +} + +func TestRecordMaxPods(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.UserNamespacesSupport, true)() + + testUserNsPodsManager := &testUserNsPodsManager{} + m, err := MakeUserNsManager(testUserNsPodsManager) + require.NoError(t, err) + + // The first maxPods allocations should succeed. + for i := 0; i < maxPods; i++ { + err = m.record(types.UID(fmt.Sprintf("%d", i)), uint32((i+1)*65536), 65536) + require.NoError(t, err) + } + + // The next allocation should fail, hitting maxPods. + err = m.record(types.UID(fmt.Sprintf("%d", maxPods+1)), uint32((maxPods+1)*65536), 65536) + assert.Error(t, err) +} diff --git a/pkg/kubelet/util/node_startup_latency_tracker.go b/pkg/kubelet/util/node_startup_latency_tracker.go new file mode 100644 index 0000000000000..815e4e81eaf91 --- /dev/null +++ b/pkg/kubelet/util/node_startup_latency_tracker.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "sync" + "time" + + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/utils/clock" +) + +type NodeStartupLatencyTracker interface { + // This function may be called across Kubelet restart. + RecordAttemptRegisterNode() + // This function should not be called across Kubelet restart. + RecordRegisteredNewNode() + // This function may be called across Kubelet restart. + RecordNodeReady() +} + +type basicNodeStartupLatencyTracker struct { + lock sync.Mutex + + bootTime time.Time + kubeletStartTime time.Time + firstRegistrationAttemptTime time.Time + firstRegisteredNewNodeTime time.Time + firstNodeReadyTime time.Time + + // For testability + clock clock.Clock +} + +func NewNodeStartupLatencyTracker() NodeStartupLatencyTracker { + bootTime, err := GetBootTime() + if err != nil { + bootTime = time.Time{} + } + return &basicNodeStartupLatencyTracker{ + bootTime: bootTime, + kubeletStartTime: time.Now(), + clock: clock.RealClock{}, + } +} + +func (n *basicNodeStartupLatencyTracker) RecordAttemptRegisterNode() { + n.lock.Lock() + defer n.lock.Unlock() + + if !n.firstRegistrationAttemptTime.IsZero() { + return + } + + n.firstRegistrationAttemptTime = n.clock.Now() +} + +func (n *basicNodeStartupLatencyTracker) RecordRegisteredNewNode() { + n.lock.Lock() + defer n.lock.Unlock() + + if n.firstRegistrationAttemptTime.IsZero() || !n.firstRegisteredNewNodeTime.IsZero() { + return + } + + n.firstRegisteredNewNodeTime = n.clock.Now() + + if !n.bootTime.IsZero() { + metrics.NodeStartupPreKubeletDuration.Set(n.kubeletStartTime.Sub(n.bootTime).Seconds()) + } + metrics.NodeStartupPreRegistrationDuration.Set(n.firstRegistrationAttemptTime.Sub(n.kubeletStartTime).Seconds()) + metrics.NodeStartupRegistrationDuration.Set(n.firstRegisteredNewNodeTime.Sub(n.firstRegistrationAttemptTime).Seconds()) +} + +func (n *basicNodeStartupLatencyTracker) RecordNodeReady() { + n.lock.Lock() + defer n.lock.Unlock() + + if n.firstRegisteredNewNodeTime.IsZero() || !n.firstNodeReadyTime.IsZero() { + return + } + + n.firstNodeReadyTime = n.clock.Now() + + metrics.NodeStartupPostRegistrationDuration.Set(n.firstNodeReadyTime.Sub(n.firstRegisteredNewNodeTime).Seconds()) + if !n.bootTime.IsZero() { + metrics.NodeStartupDuration.Set(n.firstNodeReadyTime.Sub(n.bootTime).Seconds()) + } +} diff --git a/pkg/kubelet/util/node_startup_latency_tracker_test.go b/pkg/kubelet/util/node_startup_latency_tracker_test.go new file mode 100644 index 0000000000000..bcb341100b30c --- /dev/null +++ b/pkg/kubelet/util/node_startup_latency_tracker_test.go @@ -0,0 +1,418 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "k8s.io/component-base/metrics/testutil" + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/utils/clock" + testingclock "k8s.io/utils/clock/testing" +) + +const ( + metricsNameNodeStartupPreKubelet = "kubelet_node_startup_pre_kubelet_duration_seconds" + metricsNameNodeStartupPreRegistration = "kubelet_node_startup_pre_registration_duration_seconds" + metricsNameNodeStartupRegistration = "kubelet_node_startup_registration_duration_seconds" + metricsNameNodeStartupPostRegistration = "kubelet_node_startup_post_registration_duration_seconds" + metricsNameNodeStartup = "kubelet_node_startup_duration_seconds" +) + +func TestNodeStartupLatencyNoEvents(t *testing.T) { + t.Run("metrics registered; no incoming events", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: clock.RealClock{}, + } + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 0 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 0 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + + assert.Equal(t, frozenTime.Add(-100*time.Millisecond), tracker.bootTime) + assert.Equal(t, frozenTime, tracker.kubeletStartTime) + assert.True(t, tracker.firstRegistrationAttemptTime.IsZero()) + assert.True(t, tracker.firstRegisteredNewNodeTime.IsZero()) + assert.True(t, tracker.firstNodeReadyTime.IsZero()) + }) +} + +func TestRecordAllTimestamps(t *testing.T) { + t.Run("all timestamps are recorded", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(800 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + assert.Equal(t, frozenTime.Add(800*time.Millisecond), tracker.firstRegistrationAttemptTime) + + fakeClock.Step(400 * time.Millisecond) + tracker.RecordRegisteredNewNode() + + assert.Equal(t, frozenTime.Add(1200*time.Millisecond), tracker.firstRegisteredNewNodeTime) + + fakeClock.Step(1100 * time.Millisecond) + tracker.RecordNodeReady() + + assert.Equal(t, frozenTime.Add(2300*time.Millisecond), tracker.firstNodeReadyTime) + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 2.4 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 1.1 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0.1 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0.8 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0.4 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + }) +} + +func TestRecordAttemptRegister(t *testing.T) { + t.Run("record attempt register node", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(600 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + assert.Equal(t, frozenTime.Add(600*time.Millisecond), tracker.firstRegistrationAttemptTime) + assert.True(t, tracker.firstRegisteredNewNodeTime.IsZero()) + assert.True(t, tracker.firstNodeReadyTime.IsZero()) + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 0 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 0 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + }) +} + +func TestRecordAttemptRegisterTwice(t *testing.T) { + t.Run("record attempt register node twice", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(600 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + fakeClock.Step(300 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + assert.Equal(t, frozenTime.Add(600*time.Millisecond), tracker.firstRegistrationAttemptTime) + assert.True(t, tracker.firstRegisteredNewNodeTime.IsZero()) + assert.True(t, tracker.firstNodeReadyTime.IsZero()) + }) +} + +func TestSkippingRecordRegisteredNewNode(t *testing.T) { + t.Run("record register new node twice", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(100 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + fakeClock.Step(500 * time.Millisecond) + tracker.RecordRegisteredNewNode() + + fakeClock.Step(300 * time.Millisecond) + tracker.RecordRegisteredNewNode() + + assert.Equal(t, frozenTime.Add(600*time.Millisecond), tracker.firstRegisteredNewNodeTime) + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 0 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 0 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0.1 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0.1 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0.5 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + }) + + t.Run("record register new node without previous step", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(700 * time.Millisecond) + tracker.RecordRegisteredNewNode() + + assert.True(t, tracker.firstRegisteredNewNodeTime.IsZero()) + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 0 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 0 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + }) +} + +func TestSkippingRecordNodeReady(t *testing.T) { + t.Run("record node ready twice", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(100 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + fakeClock.Step(200 * time.Millisecond) + tracker.RecordRegisteredNewNode() + + fakeClock.Step(300 * time.Millisecond) + tracker.RecordNodeReady() + + fakeClock.Step(700 * time.Millisecond) + tracker.RecordNodeReady() + + assert.Equal(t, frozenTime.Add(600*time.Millisecond), tracker.firstNodeReadyTime) + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 0.7 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 0.3 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0.1 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0.1 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0.2 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + }) + + t.Run("record node ready without previous step", func(t *testing.T) { + metrics.Register() + defer clearMetrics() + + fakeClock := testingclock.NewFakeClock(frozenTime) + tracker := &basicNodeStartupLatencyTracker{ + bootTime: frozenTime.Add(-100 * time.Millisecond), + kubeletStartTime: frozenTime, + clock: fakeClock, + } + + fakeClock.Step(100 * time.Millisecond) + tracker.RecordAttemptRegisterNode() + + fakeClock.Step(700 * time.Millisecond) + tracker.RecordNodeReady() + + assert.True(t, tracker.firstNodeReadyTime.IsZero()) + + wants := ` + # HELP kubelet_node_startup_duration_seconds [ALPHA] Duration in seconds of node startup in total. + # TYPE kubelet_node_startup_duration_seconds gauge + kubelet_node_startup_duration_seconds 0 + # HELP kubelet_node_startup_post_registration_duration_seconds [ALPHA] Duration in seconds of node startup after registration. + # TYPE kubelet_node_startup_post_registration_duration_seconds gauge + kubelet_node_startup_post_registration_duration_seconds 0 + # HELP kubelet_node_startup_pre_kubelet_duration_seconds [ALPHA] Duration in seconds of node startup before kubelet starts. + # TYPE kubelet_node_startup_pre_kubelet_duration_seconds gauge + kubelet_node_startup_pre_kubelet_duration_seconds 0 + # HELP kubelet_node_startup_pre_registration_duration_seconds [ALPHA] Duration in seconds of node startup before registration. + # TYPE kubelet_node_startup_pre_registration_duration_seconds gauge + kubelet_node_startup_pre_registration_duration_seconds 0 + # HELP kubelet_node_startup_registration_duration_seconds [ALPHA] Duration in seconds of node startup during registration. + # TYPE kubelet_node_startup_registration_duration_seconds gauge + kubelet_node_startup_registration_duration_seconds 0 + ` + if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(wants), + metricsNameNodeStartupPreKubelet, + metricsNameNodeStartupPreRegistration, + metricsNameNodeStartupRegistration, + metricsNameNodeStartupPostRegistration, + metricsNameNodeStartup, + ); err != nil { + t.Error(err) + } + }) +} + +func clearMetrics() { + metrics.NodeStartupPreKubeletDuration.Set(0) + metrics.NodeStartupPreRegistrationDuration.Set(0) + metrics.NodeStartupRegistrationDuration.Set(0) + metrics.NodeStartupPostRegistrationDuration.Set(0) + metrics.NodeStartupDuration.Set(0) +} diff --git a/pkg/kubelet/util/pod_startup_latency_tracker.go b/pkg/kubelet/util/pod_startup_latency_tracker.go index 88a1b506f714f..c35918b856490 100644 --- a/pkg/kubelet/util/pod_startup_latency_tracker.go +++ b/pkg/kubelet/util/pod_startup_latency_tracker.go @@ -102,6 +102,7 @@ func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when tim klog.InfoS("Observed pod startup duration", "pod", klog.KObj(pod), "podStartSLOduration", podStartSLOduration, + "podStartE2EDuration", podStartingDuration, "podCreationTimestamp", pod.CreationTimestamp.Time, "firstStartedPulling", state.firstStartedPulling, "lastFinishedPulling", state.lastFinishedPulling, @@ -109,6 +110,7 @@ func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when tim "watchObservedRunningTime", when) metrics.PodStartSLIDuration.WithLabelValues().Observe(podStartSLOduration) + metrics.PodStartTotalDuration.WithLabelValues().Observe(podStartingDuration.Seconds()) state.metricRecorded = true } } diff --git a/pkg/kubelet/util/util.go b/pkg/kubelet/util/util.go index 97933afe39b0f..79473a1818436 100644 --- a/pkg/kubelet/util/util.go +++ b/pkg/kubelet/util/util.go @@ -21,6 +21,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/util/filesystem" ) // FromApiserverCache modifies so that the GET request will @@ -29,6 +30,8 @@ func FromApiserverCache(opts *metav1.GetOptions) { opts.ResourceVersion = "0" } +var IsUnixDomainSocket = filesystem.IsUnixDomainSocket + // GetNodenameForKernel gets hostname value to set in the hostname field (the nodename field of struct utsname) of the pod. func GetNodenameForKernel(hostname string, hostDomainName string, setHostnameAsFQDN *bool) (string, error) { kernelHostname := hostname diff --git a/pkg/kubelet/util/util_test.go b/pkg/kubelet/util/util_test.go index 11a82f61ed547..383b1fdb14f42 100644 --- a/pkg/kubelet/util/util_test.go +++ b/pkg/kubelet/util/util_test.go @@ -17,12 +17,9 @@ limitations under the License. package util import ( - "net" - "os" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestGetNodenameForKernel(t *testing.T) { @@ -89,63 +86,3 @@ func TestGetNodenameForKernel(t *testing.T) { } } - -func TestIsUnixDomainSocket(t *testing.T) { - tests := []struct { - label string - listenOnSocket bool - expectSocket bool - expectError bool - invalidFile bool - }{ - { - label: "Domain Socket file", - listenOnSocket: true, - expectSocket: true, - expectError: false, - }, - { - label: "Non Existent file", - invalidFile: true, - expectError: true, - }, - { - label: "Regular file", - listenOnSocket: false, - expectSocket: false, - expectError: false, - }, - } - for _, test := range tests { - f, err := os.CreateTemp("", "test-domain-socket") - require.NoErrorf(t, err, "Failed to create file for test purposes: %v while setting up: %s", err, test.label) - addr := f.Name() - f.Close() - var ln *net.UnixListener - if test.listenOnSocket { - os.Remove(addr) - ta, err := net.ResolveUnixAddr("unix", addr) - require.NoErrorf(t, err, "Failed to ResolveUnixAddr: %v while setting up: %s", err, test.label) - ln, err = net.ListenUnix("unix", ta) - require.NoErrorf(t, err, "Failed to ListenUnix: %v while setting up: %s", err, test.label) - } - fileToTest := addr - if test.invalidFile { - fileToTest = fileToTest + ".invalid" - } - result, err := IsUnixDomainSocket(fileToTest) - if test.listenOnSocket { - // this takes care of removing the file associated with the domain socket - ln.Close() - } else { - // explicitly remove regular file - os.Remove(addr) - } - if test.expectError { - assert.Errorf(t, err, "Unexpected nil error from IsUnixDomainSocket for %s", test.label) - } else { - assert.NoErrorf(t, err, "Unexpected error invoking IsUnixDomainSocket for %s", test.label) - } - assert.Equal(t, result, test.expectSocket, "Unexpected result from IsUnixDomainSocket: %v for %s", result, test.label) - } -} diff --git a/pkg/kubelet/util/util_unix.go b/pkg/kubelet/util/util_unix.go index e68a194e6d318..c1dd608a7ea6a 100644 --- a/pkg/kubelet/util/util_unix.go +++ b/pkg/kubelet/util/util_unix.go @@ -136,18 +136,6 @@ func LocalEndpoint(path, file string) (string, error) { return filepath.Join(u.String(), file+".sock"), nil } -// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file -func IsUnixDomainSocket(filePath string) (bool, error) { - fi, err := os.Stat(filePath) - if err != nil { - return false, fmt.Errorf("stat file %s failed: %v", filePath, err) - } - if fi.Mode()&os.ModeSocket == 0 { - return false, nil - } - return true, nil -} - // NormalizePath is a no-op for Linux for now func NormalizePath(path string) string { return path diff --git a/pkg/kubelet/util/util_windows.go b/pkg/kubelet/util/util_windows.go index b8fc558d2e2f8..3837b45aa6aa8 100644 --- a/pkg/kubelet/util/util_windows.go +++ b/pkg/kubelet/util/util_windows.go @@ -24,26 +24,17 @@ import ( "fmt" "net" "net/url" - "os" "path/filepath" "strings" "syscall" "time" "github.com/Microsoft/go-winio" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" ) const ( tcpProtocol = "tcp" npipeProtocol = "npipe" - // Amount of time to wait between attempting to use a Unix domain socket. - // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 - // the first attempt will most likely fail, hence the need to retry - socketDialRetryPeriod = 1 * time.Second - // Overall timeout value to dial a Unix domain socket, including retries - socketDialTimeout = 4 * time.Second ) // CreateListener creates a listener on the specified endpoint. @@ -154,54 +145,6 @@ func GetBootTime() (time.Time, error) { return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil } -// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file -// Note that due to the retry logic inside, it could take up to 4 seconds -// to determine whether or not the file path supplied is a Unix domain socket -func IsUnixDomainSocket(filePath string) (bool, error) { - // Due to the absence of golang support for os.ModeSocket in Windows (https://github.com/golang/go/issues/33357) - // we need to dial the file and check if we receive an error to determine if a file is Unix Domain Socket file. - - // Note that querrying for the Reparse Points (https://docs.microsoft.com/en-us/windows/win32/fileio/reparse-points) - // for the file (using FSCTL_GET_REPARSE_POINT) and checking for reparse tag: reparseTagSocket - // does NOT work in 1809 if the socket file is created within a bind mounted directory by a container - // and the FSCTL is issued in the host by the kubelet. - - // If the file does not exist, it cannot be a Unix domain socket. - if _, err := os.Stat(filePath); os.IsNotExist(err) { - return false, fmt.Errorf("File %s not found. Err: %v", filePath, err) - } - - klog.V(6).InfoS("Function IsUnixDomainSocket starts", "filePath", filePath) - // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 we cannot rely - // on the Unix Domain socket working on the very first try, hence the potential need to - // dial multiple times - var lastSocketErr error - err := wait.PollImmediate(socketDialRetryPeriod, socketDialTimeout, - func() (bool, error) { - klog.V(6).InfoS("Dialing the socket", "filePath", filePath) - var c net.Conn - c, lastSocketErr = net.Dial("unix", filePath) - if lastSocketErr == nil { - c.Close() - klog.V(6).InfoS("Socket dialed successfully", "filePath", filePath) - return true, nil - } - klog.V(6).InfoS("Failed the current attempt to dial the socket, so pausing before retry", - "filePath", filePath, "err", lastSocketErr, "socketDialRetryPeriod", - socketDialRetryPeriod) - return false, nil - }) - - // PollImmediate will return "timed out waiting for the condition" if the function it - // invokes never returns true - if err != nil { - klog.V(2).InfoS("Failed all attempts to dial the socket so marking it as a non-Unix Domain socket. Last socket error along with the error from PollImmediate follow", - "filePath", filePath, "lastSocketErr", lastSocketErr, "err", err) - return false, nil - } - return true, nil -} - // NormalizePath converts FS paths returned by certain go frameworks (like fsnotify) // to native Windows paths that can be passed to Windows specific code func NormalizePath(path string) string { diff --git a/pkg/kubelet/util/util_windows_test.go b/pkg/kubelet/util/util_windows_test.go index 1450365a4c5e6..fa5ab43a5e519 100644 --- a/pkg/kubelet/util/util_windows_test.go +++ b/pkg/kubelet/util/util_windows_test.go @@ -182,64 +182,6 @@ func TestParseEndpoint(t *testing.T) { } -func TestIsUnixDomainSocketPipe(t *testing.T) { - generatePipeName := func(suffixLen int) string { - rand.Seed(time.Now().UnixNano()) - letter := []rune("abcdef0123456789") - b := make([]rune, suffixLen) - for i := range b { - b[i] = letter[rand.Intn(len(letter))] - } - return "\\\\.\\pipe\\test-pipe" + string(b) - } - testFile := generatePipeName(4) - pipeln, err := winio.ListenPipe(testFile, &winio.PipeConfig{SecurityDescriptor: "D:P(A;;GA;;;BA)(A;;GA;;;SY)"}) - defer pipeln.Close() - - require.NoErrorf(t, err, "Failed to listen on named pipe for test purposes: %v", err) - result, err := IsUnixDomainSocket(testFile) - assert.NoError(t, err, "Unexpected error from IsUnixDomainSocket.") - assert.False(t, result, "Unexpected result: true from IsUnixDomainSocket.") -} - -// This is required as on Windows it's possible for the socket file backing a Unix domain socket to -// exist but not be ready for socket communications yet as per -// https://github.com/kubernetes/kubernetes/issues/104584 -func TestPendingUnixDomainSocket(t *testing.T) { - // Create a temporary file that will simulate the Unix domain socket file in a - // not-yet-ready state. We need this because the Kubelet keeps an eye on file - // changes and acts on them, leading to potential race issues as described in - // the referenced issue above - f, err := os.CreateTemp("", "test-domain-socket") - require.NoErrorf(t, err, "Failed to create file for test purposes: %v", err) - testFile := f.Name() - f.Close() - - // Start the check at this point - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - result, err := IsUnixDomainSocket(testFile) - assert.Nil(t, err, "Unexpected error from IsUnixDomainSocket: %v", err) - assert.True(t, result, "Unexpected result: false from IsUnixDomainSocket.") - wg.Done() - }() - - // Wait a sufficient amount of time to make sure the retry logic kicks in - time.Sleep(socketDialRetryPeriod) - - // Replace the temporary file with an actual Unix domain socket file - os.Remove(testFile) - ta, err := net.ResolveUnixAddr("unix", testFile) - require.NoError(t, err, "Failed to ResolveUnixAddr.") - unixln, err := net.ListenUnix("unix", ta) - require.NoError(t, err, "Failed to ListenUnix.") - - // Wait for the goroutine to finish, then close the socket - wg.Wait() - unixln.Close() -} - func TestNormalizePath(t *testing.T) { tests := []struct { originalpath string diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index a0ed93f00a1c7..b61f92a48b9cf 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -877,7 +877,6 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { // Verifies volume/pod combo exist using PodExistsInVolume() func Test_AddPodToVolume_Positive_SELinux(t *testing.T) { // Arrange - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t) asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) @@ -956,7 +955,6 @@ func Test_AddPodToVolume_Positive_SELinux(t *testing.T) { // Verifies newly added volume exists in GetGloballyMountedVolumes() func Test_MarkDeviceAsMounted_Positive_SELinux(t *testing.T) { // Arrange - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t) asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_wold_selinux_metrics.go b/pkg/kubelet/volumemanager/cache/desired_state_of_wold_selinux_metrics.go index 16727d0259175..4dd5ecd2efff2 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_wold_selinux_metrics.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_wold_selinux_metrics.go @@ -49,24 +49,30 @@ var ( Help: "Number of errors when a Pod defines different SELinux contexts for its containers that use the same volume. They are not errors yet, but they will become real errors when SELinuxMountReadWriteOncePod feature is expanded to all volume access modes.", StabilityLevel: compbasemetrics.ALPHA, }) - seLinuxVolumeContextMismatchErrors = compbasemetrics.NewGauge( + seLinuxVolumeContextMismatchErrors = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "volume_manager_selinux_volume_context_mismatch_errors_total", Help: "Number of errors when a Pod uses a volume that is already mounted with a different SELinux context than the Pod needs. Kubelet can't start such a Pod then and it will retry, therefore value of this metric may not represent the actual nr. of Pods.", StabilityLevel: compbasemetrics.ALPHA, - }) - seLinuxVolumeContextMismatchWarnings = compbasemetrics.NewGauge( + }, + []string{"volume_plugin"}, + ) + seLinuxVolumeContextMismatchWarnings = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "volume_manager_selinux_volume_context_mismatch_warnings_total", Help: "Number of errors when a Pod uses a volume that is already mounted with a different SELinux context than the Pod needs. They are not errors yet, but they will become real errors when SELinuxMountReadWriteOncePod feature is expanded to all volume access modes.", StabilityLevel: compbasemetrics.ALPHA, - }) - seLinuxVolumesAdmitted = compbasemetrics.NewGauge( + }, + []string{"volume_plugin"}, + ) + seLinuxVolumesAdmitted = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "volume_manager_selinux_volumes_admitted_total", Help: "Number of volumes whose SELinux context was fine and will be mounted with mount -o context option.", StabilityLevel: compbasemetrics.ALPHA, - }) + }, + []string{"volume_plugin"}, + ) registerMetrics sync.Once ) diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index 2a7abe23c9450..7c417516b24ce 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -31,6 +31,7 @@ import ( "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/metrics" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/volume/csi" resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/features" @@ -273,6 +274,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( volumeSpec.Name(), err) } + volumePluginName := getVolumePluginNameWithDriver(volumePlugin, volumeSpec) var volumeName v1.UniqueVolumeName @@ -304,7 +306,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( } klog.V(4).InfoS("expected volume SELinux label context", "volume", volumeSpec.Name(), "label", seLinuxFileLabel) - if vol, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists { + if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists { var sizeLimit *resource.Quantity if volumeSpec.Volume != nil { if util.IsLocalEphemeralVolume(*volumeSpec.Volume) { @@ -326,7 +328,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( effectiveSELinuxMountLabel = "" } if seLinuxFileLabel != "" { - seLinuxVolumesAdmitted.Add(1.0) + seLinuxVolumesAdmitted.WithLabelValues(volumePluginName).Add(1.0) } vmt := volumeToMount{ volumeName: volumeName, @@ -348,18 +350,6 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( } } dsw.volumesToMount[volumeName] = vmt - } else { - // volume exists - if pluginSupportsSELinuxContextMount { - if seLinuxFileLabel != vol.originalSELinuxLabel { - // TODO: update the error message after tests, e.g. add at least the conflicting pod names. - fullErr := fmt.Errorf("conflicting SELinux labels of volume %s: %q and %q", volumeSpec.Name(), vol.originalSELinuxLabel, seLinuxFileLabel) - supported := util.VolumeSupportsSELinuxMount(volumeSpec) - if err := handleSELinuxMetricError(fullErr, supported, seLinuxVolumeContextMismatchWarnings, seLinuxVolumeContextMismatchErrors); err != nil { - return "", err - } - } - } } oldPodMount, ok := dsw.volumesToMount[volumeName].podsToMount[podName] @@ -368,6 +358,26 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( mountRequestTime = oldPodMount.mountRequestTime } + if !ok { + // The volume exists, but not with this pod. + // It will be added below as podToMount, now just report SELinux metric. + if pluginSupportsSELinuxContextMount { + existingVolume := dsw.volumesToMount[volumeName] + if seLinuxFileLabel != existingVolume.originalSELinuxLabel { + fullErr := fmt.Errorf("conflicting SELinux labels of volume %s: %q and %q", volumeSpec.Name(), existingVolume.originalSELinuxLabel, seLinuxFileLabel) + supported := util.VolumeSupportsSELinuxMount(volumeSpec) + err := handleSELinuxMetricError( + fullErr, + supported, + seLinuxVolumeContextMismatchWarnings.WithLabelValues(volumePluginName), + seLinuxVolumeContextMismatchErrors.WithLabelValues(volumePluginName)) + if err != nil { + return "", err + } + } + } + } + // Create new podToMount object. If it already exists, it is refreshed with // updated values (this is required for volumes that require remounting on // pod update, like Downward API volumes). @@ -646,7 +656,7 @@ func (dsw *desiredStateOfWorld) getSELinuxMountSupport(volumeSpec *volume.Spec) } // Based on isRWOP, bump the right warning / error metric and either consume the error or return it. -func handleSELinuxMetricError(err error, seLinuxSupported bool, warningMetric, errorMetric *metrics.Gauge) error { +func handleSELinuxMetricError(err error, seLinuxSupported bool, warningMetric, errorMetric metrics.GaugeMetric) error { if seLinuxSupported { errorMetric.Add(1.0) return err @@ -657,3 +667,21 @@ func handleSELinuxMetricError(err error, seLinuxSupported bool, warningMetric, e klog.V(4).ErrorS(err, "Please report this error in https://github.com/kubernetes/enhancements/issues/1710, together with full Pod yaml file") return nil } + +// Return the volume plugin name, together with the CSI driver name if it's a CSI volume. +func getVolumePluginNameWithDriver(plugin volume.VolumePlugin, spec *volume.Spec) string { + pluginName := plugin.GetPluginName() + if pluginName != csi.CSIPluginName { + return pluginName + } + + // It's a CSI volume + driverName, err := csi.GetCSIDriverName(spec) + if err != nil { + // In theory this is unreachable - such volume would not pass validation. + klog.V(4).ErrorS(err, "failed to get CSI driver name from volume spec") + driverName = "unknown" + } + // `/` is used to separate plugin + CSI driver in util.GetUniqueVolumeName() too + return pluginName + "/" + driverName +} diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go index 32dd140fbf6f1..e465f9d6d4c81 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go @@ -609,14 +609,15 @@ func Test_AddPodToVolume_WithEmptyDirSizeLimit(t *testing.T) { // Verifies newly added pod/volume exists via PodExistsInVolume() without SELinux context // VolumeExists() and GetVolumesToMount() and no errors. func Test_AddPodToVolume_Positive_SELinuxNoRWOP(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() // Arrange plugins := []volume.VolumePlugin{ - &volumetesting.FakeBasicVolumePlugin{ - Plugin: volumetesting.FakeVolumePlugin{ - PluginName: "basic", - SupportsSELinux: true, + &volumetesting.FakeDeviceMountableVolumePlugin{ + FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{ + Plugin: volumetesting.FakeVolumePlugin{ + PluginName: "basic", + SupportsSELinux: true, + }, }, }, } @@ -690,14 +691,15 @@ func Test_AddPodToVolume_Positive_SELinuxNoRWOP(t *testing.T) { // Verifies newly added pod/volume exists via PodExistsInVolume() without SELinux context // VolumeExists() and GetVolumesToMount() and no errors. func Test_AddPodToVolume_Positive_NoSELinuxPlugin(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() // Arrange plugins := []volume.VolumePlugin{ - &volumetesting.FakeBasicVolumePlugin{ - Plugin: volumetesting.FakeVolumePlugin{ - PluginName: "basic", - SupportsSELinux: false, + &volumetesting.FakeDeviceMountableVolumePlugin{ + FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{ + Plugin: volumetesting.FakeVolumePlugin{ + PluginName: "basic", + SupportsSELinux: false, + }, }, }, } @@ -772,14 +774,15 @@ func Test_AddPodToVolume_Positive_NoSELinuxPlugin(t *testing.T) { // Verifies newly added pod/volume exists via PodExistsInVolume() // VolumeExists() and GetVolumesToMount() and no errors. func Test_AddPodToVolume_Positive_ExistingPodSameSELinuxRWOP(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() // Arrange plugins := []volume.VolumePlugin{ - &volumetesting.FakeBasicVolumePlugin{ - Plugin: volumetesting.FakeVolumePlugin{ - PluginName: "basic", - SupportsSELinux: true, + &volumetesting.FakeDeviceMountableVolumePlugin{ + FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{ + Plugin: volumetesting.FakeVolumePlugin{ + PluginName: "basic", + SupportsSELinux: true, + }, }, }, } @@ -873,14 +876,15 @@ func Test_AddPodToVolume_Positive_ExistingPodSameSELinuxRWOP(t *testing.T) { // Verifies newly added pod/volume exists via PodExistsInVolume() // VolumeExists() and GetVolumesToMount() and no errors. func Test_AddPodToVolume_Negative_ExistingPodDifferentSELinuxRWOP(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() // Arrange plugins := []volume.VolumePlugin{ - &volumetesting.FakeBasicVolumePlugin{ - Plugin: volumetesting.FakeVolumePlugin{ - PluginName: "basic", - SupportsSELinux: true, + &volumetesting.FakeDeviceMountableVolumePlugin{ + FakeBasicVolumePlugin: volumetesting.FakeBasicVolumePlugin{ + Plugin: volumetesting.FakeVolumePlugin{ + PluginName: "basic", + SupportsSELinux: true, + }, }, }, } @@ -961,7 +965,7 @@ func Test_AddPodToVolume_Negative_ExistingPodDifferentSELinuxRWOP(t *testing.T) pod2.Name = "pod2" pod2.UID = "pod2uid" pod2.Spec.SecurityContext.SELinuxOptions = &seLinux2 - pod2Name := util.GetUniquePodName(pod) + pod2Name := util.GetUniquePodName(pod2) // Act _, err = dsw.AddPodToVolume( @@ -971,7 +975,7 @@ func Test_AddPodToVolume_Negative_ExistingPodDifferentSELinuxRWOP(t *testing.T) t.Fatalf("Second AddPodToVolume succeeded, expected a failure") } // Verify the original SELinux context is still in DSW - verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolumeName, "system_u:object_r:container_file_t:s0:c1,c2", dsw) + verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, "system_u:object_r:container_file_t:s0:c1,c2", dsw) } func verifyVolumeExistsDsw( diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go index a16adb55dc2a9..6b4971db63d73 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go @@ -1189,7 +1189,6 @@ func TestCheckVolumeFSResize(t *testing.T) { } func TestCheckVolumeSELinux(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() fullOpts := &v1.SELinuxOptions{ User: "system_u", diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go b/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go index 57e534d9a0e47..584ce9bff0d8c 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go @@ -23,6 +23,7 @@ import ( "path/filepath" "time" + "github.com/go-logr/logr" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -37,6 +38,12 @@ import ( utilstrings "k8s.io/utils/strings" ) +// these interfaces are necessary to keep the structures private +// and at the same time log them correctly in structured logs. +var _ logr.Marshaler = podVolume{} +var _ logr.Marshaler = reconstructedVolume{} +var _ logr.Marshaler = globalVolumeInfo{} + type podVolume struct { podName volumetypes.UniquePodName volumeSpecName string @@ -45,6 +52,22 @@ type podVolume struct { volumeMode v1.PersistentVolumeMode } +func (p podVolume) MarshalLog() interface{} { + return struct { + PodName string `json:"podName"` + VolumeSpecName string `json:"volumeSpecName"` + VolumePath string `json:"volumePath"` + PluginName string `json:"pluginName"` + VolumeMode string `json:"volumeMode"` + }{ + PodName: string(p.podName), + VolumeSpecName: p.volumeSpecName, + VolumePath: p.volumePath, + PluginName: p.pluginName, + VolumeMode: string(p.volumeMode), + } +} + type reconstructedVolume struct { volumeName v1.UniqueVolumeName podName volumetypes.UniquePodName @@ -59,6 +82,28 @@ type reconstructedVolume struct { seLinuxMountContext string } +func (rv reconstructedVolume) MarshalLog() interface{} { + return struct { + VolumeName string `json:"volumeName"` + PodName string `json:"podName"` + VolumeSpecName string `json:"volumeSpecName"` + OuterVolumeSpecName string `json:"outerVolumeSpecName"` + PodUID string `json:"podUID"` + VolumeGIDValue string `json:"volumeGIDValue"` + DevicePath string `json:"devicePath"` + SeLinuxMountContext string `json:"seLinuxMountContext"` + }{ + VolumeName: string(rv.volumeName), + PodName: string(rv.podName), + VolumeSpecName: rv.volumeSpec.Name(), + OuterVolumeSpecName: rv.outerVolumeSpecName, + PodUID: string(rv.pod.UID), + VolumeGIDValue: rv.volumeGidValue, + DevicePath: rv.devicePath, + SeLinuxMountContext: rv.seLinuxMountContext, + } +} + // globalVolumeInfo stores reconstructed volume information // for each pod that was using that volume. type globalVolumeInfo struct { @@ -71,6 +116,25 @@ type globalVolumeInfo struct { podVolumes map[volumetypes.UniquePodName]*reconstructedVolume } +func (gvi globalVolumeInfo) MarshalLog() interface{} { + podVolumes := make(map[volumetypes.UniquePodName]v1.UniqueVolumeName) + for podName, volume := range gvi.podVolumes { + podVolumes[podName] = volume.volumeName + } + + return struct { + VolumeName string `json:"volumeName"` + VolumeSpecName string `json:"volumeSpecName"` + DevicePath string `json:"devicePath"` + PodVolumes map[volumetypes.UniquePodName]v1.UniqueVolumeName `json:"podVolumes"` + }{ + VolumeName: string(gvi.volumeName), + VolumeSpecName: gvi.volumeSpec.Name(), + DevicePath: gvi.devicePath, + PodVolumes: podVolumes, + } +} + func (rc *reconciler) updateLastSyncTime() { rc.timeOfLastSyncLock.Lock() defer rc.timeOfLastSyncLock.Unlock() @@ -181,7 +245,9 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } } } - klog.V(4).InfoS("Get volumes from pod directory", "path", podDir, "volumes", volumes) + for _, volume := range volumes { + klog.V(4).InfoS("Get volume from pod directory", "path", podDir, "volume", volume) + } return volumes, nil } diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index 29ba3d4e1b964..74db6c95c37e8 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -97,24 +97,25 @@ func NewHollowKubelet( runtimeService internalapi.RuntimeService, containerManager cm.ContainerManager) *HollowKubelet { d := &kubelet.Dependencies{ - KubeClient: client, - HeartbeatClient: heartbeatClient, - ProbeManager: probetest.FakeManager{}, - RemoteRuntimeService: runtimeService, - RemoteImageService: imageService, - CAdvisorInterface: cadvisorInterface, - Cloud: nil, - OSInterface: &containertest.FakeOS{}, - ContainerManager: containerManager, - VolumePlugins: volumePlugins(), - TLSOptions: nil, - OOMAdjuster: oom.NewFakeOOMAdjuster(), - Mounter: &mount.FakeMounter{}, - Subpather: &subpath.FakeSubpath{}, - HostUtil: hostutil.NewFakeHostUtil(nil), - PodStartupLatencyTracker: kubeletutil.NewPodStartupLatencyTracker(), - TracerProvider: trace.NewNoopTracerProvider(), - Recorder: &record.FakeRecorder{}, // With real recorder we attempt to read /dev/kmsg. + KubeClient: client, + HeartbeatClient: heartbeatClient, + ProbeManager: probetest.FakeManager{}, + RemoteRuntimeService: runtimeService, + RemoteImageService: imageService, + CAdvisorInterface: cadvisorInterface, + Cloud: nil, + OSInterface: &containertest.FakeOS{}, + ContainerManager: containerManager, + VolumePlugins: volumePlugins(), + TLSOptions: nil, + OOMAdjuster: oom.NewFakeOOMAdjuster(), + Mounter: &mount.FakeMounter{}, + Subpather: &subpath.FakeSubpath{}, + HostUtil: hostutil.NewFakeHostUtil(nil), + PodStartupLatencyTracker: kubeletutil.NewPodStartupLatencyTracker(), + NodeStartupLatencyTracker: kubeletutil.NewNodeStartupLatencyTracker(), + TracerProvider: trace.NewNoopTracerProvider(), + Recorder: &record.FakeRecorder{}, // With real recorder we attempt to read /dev/kmsg. } return &HollowKubelet{ diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 239471c2aeb3c..916b3cf294137 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -607,18 +607,6 @@ func AddHandlers(h printers.PrintHandler) { } _ = h.TableHandler(scaleColumnDefinitions, printScale) - clusterCIDRColumnDefinitions := []metav1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "PerNodeHostBits", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["perNodeHostBits"]}, - {Name: "IPv4", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv4"]}, - {Name: "IPv6", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv6"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "NodeSelector", Type: "string", Priority: 1, Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["nodeSelector"]}, - } - - _ = h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDR) - _ = h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDRList) - resourceClassColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "DriverName", Type: "string", Description: resourcev1alpha2.ResourceClass{}.SwaggerDoc()["driverName"]}, @@ -2800,57 +2788,6 @@ func printPriorityLevelConfigurationList(list *flowcontrol.PriorityLevelConfigur return rows, nil } -func printClusterCIDR(obj *networking.ClusterCIDR, options printers.GenerateOptions) ([]metav1.TableRow, error) { - row := metav1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - ipv4 := "" - ipv6 := "" - - if obj.Spec.IPv4 != "" { - ipv4 = obj.Spec.IPv4 - } - if obj.Spec.IPv6 != "" { - ipv6 = obj.Spec.IPv6 - } - - row.Cells = append(row.Cells, obj.Name, fmt.Sprint(obj.Spec.PerNodeHostBits), ipv4, ipv6, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - nodeSelector := "" - if obj.Spec.NodeSelector != nil { - allTerms := make([]string, 0) - for _, term := range obj.Spec.NodeSelector.NodeSelectorTerms { - if len(term.MatchExpressions) > 0 { - matchExpressions := fmt.Sprintf("MatchExpressions: %v", term.MatchExpressions) - allTerms = append(allTerms, matchExpressions) - } - - if len(term.MatchFields) > 0 { - matchFields := fmt.Sprintf("MatchFields: %v", term.MatchFields) - allTerms = append(allTerms, matchFields) - } - } - nodeSelector = strings.Join(allTerms, ",") - } - - row.Cells = append(row.Cells, nodeSelector) - } - - return []metav1.TableRow{row}, nil -} - -func printClusterCIDRList(list *networking.ClusterCIDRList, options printers.GenerateOptions) ([]metav1.TableRow, error) { - rows := make([]metav1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printClusterCIDR(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - func printIPAddress(obj *networking.IPAddress, options printers.GenerateOptions) ([]metav1.TableRow, error) { row := metav1.TableRow{ Object: runtime.RawExtension{Object: obj}, diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 6580181c45d7e..397d66aa141f6 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -6406,280 +6406,6 @@ func TestTableRowDeepCopyShouldNotPanic(t *testing.T) { } } -func TestPrintClusterCIDR(t *testing.T) { - ipv4CIDR := "10.1.0.0/16" - perNodeHostBits := int32(8) - ipv6CIDR := "fd00:1:1::/64" - - tests := []struct { - ccc networking.ClusterCIDR - options printers.GenerateOptions - expected []metav1.TableRow - }{ - { - // Test name, IPv4 only with no node selector. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test1"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test1", "8", ipv4CIDR, "", ""}}}, - }, - { - // Test name, IPv4 only with node selector, Not wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test2"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - // Does NOT get printed. - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test2", "8", ipv4CIDR, "", ""}}}, - }, - { - // Test name, IPv4 only with no node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test3"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test3", "8", ipv4CIDR, "", "", ""}}}, - }, - { - // Test name, IPv4 only with node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test4"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test4", "8", ipv4CIDR, "", "", "MatchExpressions: [{foo In [bar]}]"}}}, - }, - { - // Test name, IPv6 only with no node selector. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test5"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age - expected: []metav1.TableRow{{Cells: []interface{}{"test5", "8", "", ipv6CIDR, ""}}}, - }, - { - // Test name, IPv6 only with node selector, Not wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test6"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - // Does NOT get printed. - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test6", "8", "", ipv6CIDR, ""}}}, - }, - { - // Test name, IPv6 only with no node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test7"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test7", "8", "", ipv6CIDR, "", ""}}}, - }, - { - // Test name, IPv6 only with node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test8"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test8", "8", "", ipv6CIDR, "", "MatchExpressions: [{foo In [bar]}]"}}}, - }, - { - // Test name, DualStack with no node selector. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test9"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test9", "8", ipv4CIDR, ipv6CIDR, ""}}}, - }, - { - // Test name,DualStack with node selector, Not wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test10"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - // Does NOT get printed. - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test10", "8", ipv4CIDR, ipv6CIDR, ""}}}, - }, - { - // Test name, DualStack with no node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test11"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector. - expected: []metav1.TableRow{{Cells: []interface{}{"test11", "8", ipv4CIDR, ipv6CIDR, "", ""}}}, - }, - { - // Test name, DualStack with node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test12"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test12", "8", ipv4CIDR, ipv6CIDR, "", "MatchExpressions: [{foo In [bar]}]"}}}, - }, - } - - for i, test := range tests { - rows, err := printClusterCIDR(&test.ccc, test.options) - if err != nil { - t.Fatal(err) - } - for i := range rows { - rows[i].Object.Object = nil - } - if !reflect.DeepEqual(test.expected, rows) { - t.Errorf("%d mismatch: %s", i, cmp.Diff(test.expected, rows)) - } - } -} - -func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector { - return &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{ - { - MatchExpressions: []api.NodeSelectorRequirement{ - { - Key: key, - Operator: op, - Values: values, - }, - }, - }, - }, - } -} - -func TestPrintClusterCIDRList(t *testing.T) { - - cccList := networking.ClusterCIDRList{ - Items: []networking.ClusterCIDR{ - { - ObjectMeta: metav1.ObjectMeta{Name: "ccc1"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "ccc2"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.2.0.0/16", - IPv6: "fd00:2:1::/64", - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - }, - } - - tests := []struct { - options printers.GenerateOptions - expected []metav1.TableRow - }{ - { - // Test name, DualStack with node selector, wide. - options: printers.GenerateOptions{Wide: false}, - expected: []metav1.TableRow{ - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - {Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", ""}}, - {Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", ""}}, - }, - }, - { - // Test name, DualStack with node selector, wide. - options: printers.GenerateOptions{Wide: true}, - expected: []metav1.TableRow{ - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector. - {Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "", "MatchExpressions: [{foo In [bar]}]"}}, - {Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "", "MatchExpressions: [{foo In [bar]}]"}}, - }, - }, - } - - for _, test := range tests { - rows, err := printClusterCIDRList(&cccList, test.options) - if err != nil { - t.Fatalf("Error printing service list: %#v", err) - } - for i := range rows { - rows[i].Object.Object = nil - } - if !reflect.DeepEqual(test.expected, rows) { - t.Errorf("mismatch: %s", cmp.Diff(test.expected, rows)) - } - } -} - func TestPrintIPAddress(t *testing.T) { ip := networking.IPAddress{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/probe/exec/exec_test.go b/pkg/probe/exec/exec_test.go index 5724228d2fc3e..c6ef85983121c 100644 --- a/pkg/probe/exec/exec_test.go +++ b/pkg/probe/exec/exec_test.go @@ -143,10 +143,10 @@ func TestExec(t *testing.T) { if status != test.expectedStatus { t.Errorf("[%d] expected %v, got %v", i, test.expectedStatus, status) } - if err != nil && test.expectError == false { + if err != nil && !test.expectError { t.Errorf("[%d] unexpected error: %v", i, err) } - if err == nil && test.expectError == true { + if err == nil && test.expectError { t.Errorf("[%d] unexpected non-error", i) } if test.output != output { diff --git a/pkg/proxy/apis/config/fuzzer/fuzzer.go b/pkg/proxy/apis/config/fuzzer/fuzzer.go index 33b39077adc1d..615c79e4aa477 100644 --- a/pkg/proxy/apis/config/fuzzer/fuzzer.go +++ b/pkg/proxy/apis/config/fuzzer/fuzzer.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // Funcs returns the fuzzer functions for the kube-proxy apis. @@ -35,16 +35,16 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { c.FuzzNoCustom(obj) obj.BindAddress = fmt.Sprintf("%d.%d.%d.%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256)) obj.ClientConnection.ContentType = c.RandString() - obj.Conntrack.MaxPerCore = pointer.Int32(c.Int31()) - obj.Conntrack.Min = pointer.Int32(c.Int31()) + obj.Conntrack.MaxPerCore = ptr.To(c.Int31()) + obj.Conntrack.Min = ptr.To(c.Int31()) obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: time.Duration(c.Int63()) * time.Hour} obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: time.Duration(c.Int63()) * time.Hour} obj.FeatureGates = map[string]bool{c.RandString(): true} obj.HealthzBindAddress = fmt.Sprintf("%d.%d.%d.%d:%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(65536)) - obj.IPTables.MasqueradeBit = pointer.Int32(c.Int31()) - obj.IPTables.LocalhostNodePorts = pointer.Bool(c.RandBool()) + obj.IPTables.MasqueradeBit = ptr.To(c.Int31()) + obj.IPTables.LocalhostNodePorts = ptr.To(c.RandBool()) obj.MetricsBindAddress = fmt.Sprintf("%d.%d.%d.%d:%d", c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(256), c.Intn(65536)) - obj.OOMScoreAdj = pointer.Int32(c.Int31()) + obj.OOMScoreAdj = ptr.To(c.Int31()) obj.ClientConnection.ContentType = "bar" obj.NodePortAddresses = []string{"1.2.3.0/24"} if obj.Logging.Format == "" { diff --git a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml index 7f01ffe5b0aed..91016c687f544 100644 --- a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml +++ b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml @@ -14,6 +14,8 @@ conntrack: min: 131072 tcpCloseWaitTimeout: 1h0m0s tcpEstablishedTimeout: 24h0m0s + udpStreamTimeout: 0s + udpTimeout: 0s detectLocal: bridgeInterface: "" interfaceNamePrefix: "" diff --git a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml index 7f01ffe5b0aed..91016c687f544 100644 --- a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml +++ b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml @@ -14,6 +14,8 @@ conntrack: min: 131072 tcpCloseWaitTimeout: 1h0m0s tcpEstablishedTimeout: 24h0m0s + udpStreamTimeout: 0s + udpTimeout: 0s detectLocal: bridgeInterface: "" interfaceNamePrefix: "" diff --git a/pkg/proxy/apis/config/types.go b/pkg/proxy/apis/config/types.go index cc00fa21da95d..d677b9b1f74fd 100644 --- a/pkg/proxy/apis/config/types.go +++ b/pkg/proxy/apis/config/types.go @@ -30,36 +30,44 @@ import ( // details for the Kubernetes proxy server. type KubeProxyIPTablesConfiguration struct { // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. + // the iptables or ipvs proxy mode. Values must be within the range [0, 31]. MasqueradeBit *int32 - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + // masqueradeAll tells kube-proxy to SNAT all traffic sent to Service cluster IPs, + // when using the iptables or ipvs proxy mode. This may be required with some CNI + // plugins. MasqueradeAll bool - // LocalhostNodePorts tells kube-proxy to allow service NodePorts to be accessed via - // localhost (iptables mode only) + // localhostNodePorts, if false, tells kube-proxy to disable the legacy behavior + // of allowing NodePort services to be accessed via localhost. (Applies only to + // iptables mode and IPv4; localhost NodePorts are never allowed with other proxy + // modes or with IPv6.) LocalhostNodePorts *bool - // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. + // syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently + // various re-synchronizing and cleanup operations are performed. Must be greater + // than 0. SyncPeriod metav1.Duration - // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). + // minSyncPeriod is the minimum period between iptables rule resyncs (e.g. '5s', + // '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will + // result in an immediate iptables resync. MinSyncPeriod metav1.Duration } // KubeProxyIPVSConfiguration contains ipvs-related configuration // details for the Kubernetes proxy server. type KubeProxyIPVSConfiguration struct { - // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. + // syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently + // various re-synchronizing and cleanup operations are performed. Must be greater + // than 0. SyncPeriod metav1.Duration - // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). + // minSyncPeriod is the minimum period between IPVS rule resyncs (e.g. '5s', '1m', + // '2h22m'). A value of 0 means every Service or EndpointSlice change will result + // in an immediate IPVS resync. MinSyncPeriod metav1.Duration - // ipvs scheduler + // scheduler is the IPVS scheduler to use Scheduler string - // excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch + // excludeCIDRs is a list of CIDRs which the ipvs proxier should not touch // when cleaning up ipvs services. ExcludeCIDRs []string - // strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries + // strictARP configures arp_ignore and arp_announce to avoid answering ARP queries // from kube-ipvs0 interface StrictARP bool // tcpTimeout is the timeout value used for idle IPVS TCP sessions. @@ -89,6 +97,14 @@ type KubeProxyConntrackConfiguration struct { // in CLOSE_WAIT state will remain in the conntrack // table. (e.g. '60s'). Must be greater than 0 to set. TCPCloseWaitTimeout *metav1.Duration + // udpTimeout is how long an idle UDP conntrack entry in + // UNREPLIED state will remain in the conntrack table + // (e.g. '30s'). Must be greater than 0 to set. + UDPTimeout metav1.Duration + // udpStreamTimeout is how long an idle UDP conntrack entry in + // ASSURED state will remain in the conntrack table + // (e.g. '300s'). Must be greater than 0 to set. + UDPStreamTimeout metav1.Duration } // KubeProxyWinkernelConfiguration contains Windows/HNS settings for @@ -103,23 +119,23 @@ type KubeProxyWinkernelConfiguration struct { // enableDSR tells kube-proxy whether HNS policies should be created // with DSR EnableDSR bool - // RootHnsEndpointName is the name of hnsendpoint that is attached to + // rootHnsEndpointName is the name of hnsendpoint that is attached to // l2bridge for root network namespace RootHnsEndpointName string - // ForwardHealthCheckVip forwards service VIP for health check port on + // forwardHealthCheckVip forwards service VIP for health check port on // Windows ForwardHealthCheckVip bool } // DetectLocalConfiguration contains optional settings related to DetectLocalMode option type DetectLocalConfiguration struct { - // BridgeInterface is a string argument which represents a single bridge interface name. - // Kube-proxy considers traffic as local if originating from this given bridge. - // This argument should be set if DetectLocalMode is set to BridgeInterface. + // bridgeInterface is a bridge interface name. When DetectLocalMode is set to + // LocalModeBridgeInterface, kube-proxy will consider traffic to be local if + // it originates from this bridge. BridgeInterface string - // InterfaceNamePrefix is a string argument which represents a single interface prefix name. - // Kube-proxy considers traffic as local if originating from one or more interfaces which match - // the given prefix. This argument should be set if DetectLocalMode is set to InterfaceNamePrefix. + // interfaceNamePrefix is an interface name prefix. When DetectLocalMode is set to + // LocalModeInterfaceNamePrefix, kube-proxy will consider traffic to be local if + // it originates from any interface whose name begins with this prefix. InterfaceNamePrefix string } @@ -133,65 +149,77 @@ type KubeProxyConfiguration struct { // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. FeatureGates map[string]bool - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) + // clientConnection specifies the kubeconfig file and client connection settings for the proxy + // server to use when communicating with the apiserver. + ClientConnection componentbaseconfig.ClientConnectionConfiguration + // logging specifies the options of logging. + // Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) + // for more information. + Logging logsapi.LoggingConfiguration + + // hostnameOverride, if non-empty, will be used as the name of the Node that + // kube-proxy is running on. If unset, the node name is assumed to be the same as + // the node's hostname. + HostnameOverride string + // bindAddress can be used to override kube-proxy's idea of what its node's + // primary IP is. Note that the name is a historical artifact, and kube-proxy does + // not actually bind any sockets to this IP. BindAddress string - // healthzBindAddress is the IP address and port for the health check server to serve on, - // defaulting to 0.0.0.0:10256 + // healthzBindAddress is the IP address and port for the health check server to + // serve on, defaulting to "0.0.0.0:10256" (if bindAddress is unset or IPv4), or + // "[::]:10256" (if bindAddress is IPv6). HealthzBindAddress string - // metricsBindAddress is the IP address and port for the metrics server to serve on, - // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) + // metricsBindAddress is the IP address and port for the metrics server to serve + // on, defaulting to "127.0.0.1:10249" (if bindAddress is unset or IPv4), or + // "[::1]:10249" (if bindAddress is IPv6). (Set to "0.0.0.0:10249" / "[::]:10249" + // to bind on all interfaces.) MetricsBindAddress string - // BindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit + // bindAddressHardFail, if true, tells kube-proxy to treat failure to bind to a + // port as fatal and exit BindAddressHardFail bool // enableProfiling enables profiling via web interface on /debug/pprof handler. // Profiling handlers will be handled by metrics server. EnableProfiling bool - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string - // clientConnection specifies the kubeconfig file and client connection settings for the proxy - // server to use when communicating with the apiserver. - ClientConnection componentbaseconfig.ClientConnectionConfiguration + // showHiddenMetricsForVersion is the version for which you want to show hidden metrics. + ShowHiddenMetricsForVersion string + + // mode specifies which proxy mode to use. + Mode ProxyMode // iptables contains iptables-related configuration options. IPTables KubeProxyIPTablesConfiguration // ipvs contains ipvs-related configuration options. IPVS KubeProxyIPVSConfiguration + // winkernel contains winkernel-related configuration options. + Winkernel KubeProxyWinkernelConfiguration + + // detectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR + DetectLocalMode LocalMode + // detectLocal contains optional configuration settings related to DetectLocalMode. + DetectLocal DetectLocalConfiguration + // clusterCIDR is the CIDR range of the pods in the cluster. (For dual-stack + // clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When + // DetectLocalMode is set to LocalModeClusterCIDR, kube-proxy will consider + // traffic to be local if its source IP is in this range. (Otherwise it is not + // used.) + ClusterCIDR string + + // nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, + // connections to NodePort services will only be accepted on node IPs in one of + // the indicated ranges. If unset, NodePort connections will be accepted on all + // local IPs. + NodePortAddresses []string + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within // the range [-1000, 1000] OOMScoreAdj *int32 - // mode specifies which proxy mode to use. - Mode ProxyMode - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string // conntrack contains conntrack-related configuration options. Conntrack KubeProxyConntrackConfiguration // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater // than 0. ConfigSyncPeriod metav1.Duration - // nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid - // IP blocks. These values are as a parameter to select the interfaces where nodeport works. - // In case someone would like to expose a service on localhost for local visit and some other interfaces for - // particular purpose, a list of IP blocks would do that. - // If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. - // If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. - // An empty string slice is meant to select all network interfaces. - NodePortAddresses []string - // winkernel contains winkernel-related configuration options. - Winkernel KubeProxyWinkernelConfiguration - // ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics. - ShowHiddenMetricsForVersion string - // DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR - DetectLocalMode LocalMode - // DetectLocal contains optional configuration settings related to DetectLocalMode. - DetectLocal DetectLocalConfiguration - // Logging specifies the options of logging. - // Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. - Logging logsapi.LoggingConfiguration + + // portRange was previously used to configure the userspace proxy, but is now unused. + PortRange string } // ProxyMode represents modes used by the Kubernetes proxy server. diff --git a/pkg/proxy/apis/config/v1alpha1/defaults.go b/pkg/proxy/apis/config/v1alpha1/defaults.go index 06f812bbb90b9..e883c98111dba 100644 --- a/pkg/proxy/apis/config/v1alpha1/defaults.go +++ b/pkg/proxy/apis/config/v1alpha1/defaults.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/qos" proxyutil "k8s.io/kubernetes/pkg/proxy/util" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func addDefaultingFuncs(scheme *kruntime.Scheme) error { @@ -66,17 +66,17 @@ func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyCo obj.IPTables.MinSyncPeriod = metav1.Duration{Duration: 1 * time.Second} } if obj.IPTables.LocalhostNodePorts == nil { - obj.IPTables.LocalhostNodePorts = pointer.Bool(true) + obj.IPTables.LocalhostNodePorts = ptr.To(true) } if obj.IPVS.SyncPeriod.Duration == 0 { obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} } if obj.Conntrack.MaxPerCore == nil { - obj.Conntrack.MaxPerCore = pointer.Int32(32 * 1024) + obj.Conntrack.MaxPerCore = ptr.To[int32](32 * 1024) } if obj.Conntrack.Min == nil { - obj.Conntrack.Min = pointer.Int32(128 * 1024) + obj.Conntrack.Min = ptr.To[int32](128 * 1024) } if obj.IPTables.MasqueradeBit == nil { diff --git a/pkg/proxy/apis/config/v1alpha1/defaults_test.go b/pkg/proxy/apis/config/v1alpha1/defaults_test.go index 3ea0257a94f5c..0ce8c815bd0cb 100644 --- a/pkg/proxy/apis/config/v1alpha1/defaults_test.go +++ b/pkg/proxy/apis/config/v1alpha1/defaults_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "github.com/google/go-cmp/cmp" @@ -53,9 +53,9 @@ func TestDefaultsKubeProxyConfiguration(t *testing.T) { Burst: 10, }, IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ - MasqueradeBit: pointer.Int32(14), + MasqueradeBit: ptr.To[int32](14), MasqueradeAll: false, - LocalhostNodePorts: pointer.Bool(true), + LocalhostNodePorts: ptr.To(true), SyncPeriod: metav1.Duration{Duration: 30 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, }, @@ -93,9 +93,9 @@ func TestDefaultsKubeProxyConfiguration(t *testing.T) { Burst: 10, }, IPTables: kubeproxyconfigv1alpha1.KubeProxyIPTablesConfiguration{ - MasqueradeBit: pointer.Int32(14), + MasqueradeBit: ptr.To[int32](14), MasqueradeAll: false, - LocalhostNodePorts: pointer.Bool(true), + LocalhostNodePorts: ptr.To(true), SyncPeriod: metav1.Duration{Duration: 30 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, }, diff --git a/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go b/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go index 3ade1180bcda4..8a20b642a4f2d 100644 --- a/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go +++ b/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go @@ -126,39 +126,39 @@ func Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguratio func autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in *v1alpha1.KubeProxyConfiguration, out *config.KubeProxyConfiguration, s conversion.Scope) error { out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { + return err + } + out.Logging = in.Logging + out.HostnameOverride = in.HostnameOverride out.BindAddress = in.BindAddress out.HealthzBindAddress = in.HealthzBindAddress out.MetricsBindAddress = in.MetricsBindAddress out.BindAddressHardFail = in.BindAddressHardFail out.EnableProfiling = in.EnableProfiling - out.ClusterCIDR = in.ClusterCIDR - out.HostnameOverride = in.HostnameOverride - if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } + out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion + out.Mode = config.ProxyMode(in.Mode) if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_config_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { return err } if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_config_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { return err } - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = config.ProxyMode(in.Mode) - out.PortRange = in.PortRange - if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { - return err - } - out.ConfigSyncPeriod = in.ConfigSyncPeriod - out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses)) if err := Convert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWinkernelConfiguration(&in.Winkernel, &out.Winkernel, s); err != nil { return err } - out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion out.DetectLocalMode = config.LocalMode(in.DetectLocalMode) if err := Convert_v1alpha1_DetectLocalConfiguration_To_config_DetectLocalConfiguration(&in.DetectLocal, &out.DetectLocal, s); err != nil { return err } - out.Logging = in.Logging + out.ClusterCIDR = in.ClusterCIDR + out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses)) + out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) + if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { + return err + } + out.ConfigSyncPeriod = in.ConfigSyncPeriod + out.PortRange = in.PortRange return nil } @@ -169,39 +169,39 @@ func Convert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguration(in func autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *config.KubeProxyConfiguration, out *v1alpha1.KubeProxyConfiguration, s conversion.Scope) error { out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { + return err + } + out.Logging = in.Logging + out.HostnameOverride = in.HostnameOverride out.BindAddress = in.BindAddress out.HealthzBindAddress = in.HealthzBindAddress out.MetricsBindAddress = in.MetricsBindAddress out.BindAddressHardFail = in.BindAddressHardFail out.EnableProfiling = in.EnableProfiling - out.ClusterCIDR = in.ClusterCIDR - out.HostnameOverride = in.HostnameOverride - if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } + out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion + out.Mode = v1alpha1.ProxyMode(in.Mode) if err := Convert_config_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { return err } if err := Convert_config_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { return err } - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = v1alpha1.ProxyMode(in.Mode) - out.PortRange = in.PortRange - if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { - return err - } - out.ConfigSyncPeriod = in.ConfigSyncPeriod - out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses)) if err := Convert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWinkernelConfiguration(&in.Winkernel, &out.Winkernel, s); err != nil { return err } - out.ShowHiddenMetricsForVersion = in.ShowHiddenMetricsForVersion out.DetectLocalMode = v1alpha1.LocalMode(in.DetectLocalMode) if err := Convert_config_DetectLocalConfiguration_To_v1alpha1_DetectLocalConfiguration(&in.DetectLocal, &out.DetectLocal, s); err != nil { return err } - out.Logging = in.Logging + out.ClusterCIDR = in.ClusterCIDR + out.NodePortAddresses = *(*[]string)(unsafe.Pointer(&in.NodePortAddresses)) + out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) + if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { + return err + } + out.ConfigSyncPeriod = in.ConfigSyncPeriod + out.PortRange = in.PortRange return nil } @@ -215,6 +215,8 @@ func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyCon out.Min = (*int32)(unsafe.Pointer(in.Min)) out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout)) out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout)) + out.UDPTimeout = in.UDPTimeout + out.UDPStreamTimeout = in.UDPStreamTimeout return nil } @@ -228,6 +230,8 @@ func autoConvert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyCon out.Min = (*int32)(unsafe.Pointer(in.Min)) out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout)) out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout)) + out.UDPTimeout = in.UDPTimeout + out.UDPStreamTimeout = in.UDPStreamTimeout return nil } diff --git a/pkg/proxy/apis/config/validation/validation.go b/pkg/proxy/apis/config/validation/validation.go index 599ba27819cae..44f44d9102292 100644 --- a/pkg/proxy/apis/config/validation/validation.go +++ b/pkg/proxy/apis/config/validation/validation.go @@ -163,14 +163,24 @@ func validateKubeProxyConntrackConfiguration(config kubeproxyconfig.KubeProxyCon allErrs = append(allErrs, field.Invalid(fldPath.Child("Min"), config.Min, "must be greater than or equal to 0")) } + // config.TCPEstablishedTimeout has a default value, so can't be nil. if config.TCPEstablishedTimeout.Duration < 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPEstablishedTimeout"), config.TCPEstablishedTimeout, "must be greater than or equal to 0")) } + // config.TCPCloseWaitTimeout has a default value, so can't be nil. if config.TCPCloseWaitTimeout.Duration < 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPCloseWaitTimeout"), config.TCPCloseWaitTimeout, "must be greater than or equal to 0")) } + if config.UDPTimeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("UDPTimeout"), config.UDPTimeout, "must be greater than or equal to 0")) + } + + if config.UDPStreamTimeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("UDPStreamTimeout"), config.UDPStreamTimeout, "must be greater than or equal to 0")) + } + return allErrs } diff --git a/pkg/proxy/apis/config/validation/validation_test.go b/pkg/proxy/apis/config/validation/validation_test.go index 6044739a15e15..d9cc7283aa902 100644 --- a/pkg/proxy/apis/config/validation/validation_test.go +++ b/pkg/proxy/apis/config/validation/validation_test.go @@ -27,7 +27,7 @@ import ( logsapi "k8s.io/component-base/logs/api/v1" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestValidateKubeProxyConfiguration(t *testing.T) { @@ -54,8 +54,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -74,8 +74,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -94,8 +94,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -114,8 +114,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -134,8 +134,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -154,8 +154,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -174,8 +174,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -194,8 +194,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -218,8 +218,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -256,8 +256,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -280,8 +280,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -304,8 +304,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -328,8 +328,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -352,8 +352,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -376,8 +376,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -402,8 +402,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { // not specifying valid period in IPVS mode. Mode: kubeproxyconfig.ProxyModeIPVS, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -426,8 +426,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -454,8 +454,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -482,8 +482,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -507,8 +507,8 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, }, Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, }, @@ -557,7 +557,7 @@ func TestValidateKubeProxyIPTablesConfiguration(t *testing.T) { }, "valid custom MasqueradeBit": { config: kubeproxyconfig.KubeProxyIPTablesConfiguration{ - MasqueradeBit: pointer.Int32(5), + MasqueradeBit: ptr.To[int32](5), MasqueradeAll: true, SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -575,7 +575,7 @@ func TestValidateKubeProxyIPTablesConfiguration(t *testing.T) { }, "MinSyncPeriod must be > 0": { config: kubeproxyconfig.KubeProxyIPTablesConfiguration{ - MasqueradeBit: pointer.Int32(5), + MasqueradeBit: ptr.To[int32](5), MasqueradeAll: true, SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: -1 * time.Second}, @@ -584,7 +584,7 @@ func TestValidateKubeProxyIPTablesConfiguration(t *testing.T) { }, "MasqueradeBit cannot be < 0": { config: kubeproxyconfig.KubeProxyIPTablesConfiguration{ - MasqueradeBit: pointer.Int32(-10), + MasqueradeBit: ptr.To[int32](-10), MasqueradeAll: true, SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -593,7 +593,7 @@ func TestValidateKubeProxyIPTablesConfiguration(t *testing.T) { }, "SyncPeriod must be >= MinSyncPeriod": { config: kubeproxyconfig.KubeProxyIPTablesConfiguration{ - MasqueradeBit: pointer.Int32(5), + MasqueradeBit: ptr.To[int32](5), MasqueradeAll: true, SyncPeriod: metav1.Duration{Duration: 1 * time.Second}, MinSyncPeriod: metav1.Duration{Duration: 5 * time.Second}, @@ -723,58 +723,92 @@ func TestValidateKubeProxyConntrackConfiguration(t *testing.T) { }{ "valid 5 second timeouts": { config: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 5 * time.Second}, }, expectedErrs: field.ErrorList{}, }, "valid duration equal to 0 second timeout": { config: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 0 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 0 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 0 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 0 * time.Second}, }, expectedErrs: field.ErrorList{}, }, "invalid MaxPerCore < 0": { config: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(-1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](-1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 5 * time.Second}, }, expectedErrs: field.ErrorList{field.Invalid(newPath.Child("KubeConntrackConfiguration.MaxPerCore"), -1, "must be greater than or equal to 0")}, }, "invalid minimum < 0": { config: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(-1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](-1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 5 * time.Second}, }, expectedErrs: field.ErrorList{field.Invalid(newPath.Child("KubeConntrackConfiguration.Min"), -1, "must be greater than or equal to 0")}, }, - "invalid EstablishedTimeout < 0": { + "invalid TCPEstablishedTimeout < 0": { config: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: -5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 5 * time.Second}, }, expectedErrs: field.ErrorList{field.Invalid(newPath.Child("KubeConntrackConfiguration.TCPEstablishedTimeout"), metav1.Duration{Duration: -5 * time.Second}, "must be greater than or equal to 0")}, }, - "invalid CloseWaitTimeout < 0": { + "invalid TCPCloseWaitTimeout < 0": { config: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, TCPCloseWaitTimeout: &metav1.Duration{Duration: -5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 5 * time.Second}, }, expectedErrs: field.ErrorList{field.Invalid(newPath.Child("KubeConntrackConfiguration.TCPCloseWaitTimeout"), metav1.Duration{Duration: -5 * time.Second}, "must be greater than or equal to 0")}, }, + "invalid UDPTimeout < 0": { + config: kubeproxyconfig.KubeProxyConntrackConfiguration{ + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: -5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: 5 * time.Second}, + }, + expectedErrs: field.ErrorList{field.Invalid(newPath.Child("KubeConntrackConfiguration.UDPTimeout"), metav1.Duration{Duration: -5 * time.Second}, "must be greater than or equal to 0")}, + }, + "invalid UDPStreamTimeout < 0": { + config: kubeproxyconfig.KubeProxyConntrackConfiguration{ + MaxPerCore: ptr.To[int32](1), + Min: ptr.To[int32](1), + TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, + TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, + UDPTimeout: metav1.Duration{Duration: 5 * time.Second}, + UDPStreamTimeout: metav1.Duration{Duration: -5 * time.Second}, + }, + expectedErrs: field.ErrorList{field.Invalid(newPath.Child("KubeConntrackConfiguration.UDPStreamTimeout"), metav1.Duration{Duration: -5 * time.Second}, "must be greater than or equal to 0")}, + }, } for _, testCase := range testCases { diff --git a/pkg/proxy/apis/config/zz_generated.deepcopy.go b/pkg/proxy/apis/config/zz_generated.deepcopy.go index 15a86792daaab..dcbbc88b11d28 100644 --- a/pkg/proxy/apis/config/zz_generated.deepcopy.go +++ b/pkg/proxy/apis/config/zz_generated.deepcopy.go @@ -76,8 +76,16 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { } } out.ClientConnection = in.ClientConnection + in.Logging.DeepCopyInto(&out.Logging) in.IPTables.DeepCopyInto(&out.IPTables) in.IPVS.DeepCopyInto(&out.IPVS) + out.Winkernel = in.Winkernel + out.DetectLocal = in.DetectLocal + if in.NodePortAddresses != nil { + in, out := &in.NodePortAddresses, &out.NodePortAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.OOMScoreAdj != nil { in, out := &in.OOMScoreAdj, &out.OOMScoreAdj *out = new(int32) @@ -85,14 +93,6 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { } in.Conntrack.DeepCopyInto(&out.Conntrack) out.ConfigSyncPeriod = in.ConfigSyncPeriod - if in.NodePortAddresses != nil { - in, out := &in.NodePortAddresses, &out.NodePortAddresses - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.Winkernel = in.Winkernel - out.DetectLocal = in.DetectLocal - in.Logging.DeepCopyInto(&out.Logging) return } @@ -137,6 +137,8 @@ func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackC *out = new(v1.Duration) **out = **in } + out.UDPTimeout = in.UDPTimeout + out.UDPStreamTimeout = in.UDPStreamTimeout return } diff --git a/pkg/proxy/config/api_test.go b/pkg/proxy/config/api_test.go index 422f4f03f168b..d5aea4cbf7126 100644 --- a/pkg/proxy/config/api_test.go +++ b/pkg/proxy/config/api_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" ktesting "k8s.io/client-go/testing" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) { @@ -83,7 +83,6 @@ func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) { } func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) { - tcp := v1.ProtocolTCP endpoints1v1 := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "e1"}, AddressType: discoveryv1.AddressTypeIPv4, @@ -93,8 +92,8 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) { }, }}, Ports: []discoveryv1.EndpointPort{{ - Port: utilpointer.Int32(8080), - Protocol: &tcp, + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }}, } endpoints1v2 := &discoveryv1.EndpointSlice{ @@ -107,8 +106,8 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) { }, }}, Ports: []discoveryv1.EndpointPort{{ - Port: utilpointer.Int32(8080), - Protocol: &tcp, + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }}, } endpoints2 := &discoveryv1.EndpointSlice{ @@ -120,8 +119,8 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) { }, }}, Ports: []discoveryv1.EndpointPort{{ - Port: utilpointer.Int32(8080), - Protocol: &tcp, + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }}, } diff --git a/pkg/proxy/config/config_test.go b/pkg/proxy/config/config_test.go index 0d91cbd815297..3f8c4225c16e6 100644 --- a/pkg/proxy/config/config_test.go +++ b/pkg/proxy/config/config_test.go @@ -32,7 +32,7 @@ import ( informers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" ktesting "k8s.io/client-go/testing" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) type sortedServices []*v1.Service @@ -345,7 +345,7 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) { }, { Addresses: []string{"2.2.2.2"}, }}, - Ports: []discoveryv1.EndpointPort{{Port: utilpointer.Int32(80)}}, + Ports: []discoveryv1.EndpointPort{{Port: ptr.To[int32](80)}}, } endpoints2 := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"}, @@ -355,7 +355,7 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) { }, { Addresses: []string{"4.4.4.4"}, }}, - Ports: []discoveryv1.EndpointPort{{Port: utilpointer.Int32(80)}}, + Ports: []discoveryv1.EndpointPort{{Port: ptr.To[int32](80)}}, } fakeWatch.Add(endpoints1) fakeWatch.Add(endpoints2) @@ -391,7 +391,7 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) { }, { Addresses: []string{"2.2.2.2"}, }}, - Ports: []discoveryv1.EndpointPort{{Port: utilpointer.Int32(80)}}, + Ports: []discoveryv1.EndpointPort{{Port: ptr.To[int32](80)}}, } endpoints2 := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "bar"}, @@ -401,7 +401,7 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) { }, { Addresses: []string{"4.4.4.4"}, }}, - Ports: []discoveryv1.EndpointPort{{Port: utilpointer.Int32(80)}}, + Ports: []discoveryv1.EndpointPort{{Port: ptr.To[int32](80)}}, } fakeWatch.Add(endpoints1) fakeWatch.Add(endpoints2) @@ -419,7 +419,7 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) { }, { Addresses: []string{"6.6.6.6"}, }}, - Ports: []discoveryv1.EndpointPort{{Port: utilpointer.Int32(80)}}, + Ports: []discoveryv1.EndpointPort{{Port: ptr.To[int32](80)}}, } fakeWatch.Add(endpoints3) endpoints = []*discoveryv1.EndpointSlice{endpoints2, endpoints1, endpoints3} @@ -433,7 +433,7 @@ func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) { Endpoints: []discoveryv1.Endpoint{{ Addresses: []string{"7.7.7.7"}, }}, - Ports: []discoveryv1.EndpointPort{{Port: utilpointer.Int32(80)}}, + Ports: []discoveryv1.EndpointPort{{Port: ptr.To[int32](80)}}, } fakeWatch.Modify(endpoints1v2) endpoints = []*discoveryv1.EndpointSlice{endpoints2, endpoints1v2, endpoints3} diff --git a/pkg/proxy/conntrack/cleanup.go b/pkg/proxy/conntrack/cleanup.go index b3b55dab978dc..4d25ff2f861aa 100644 --- a/pkg/proxy/conntrack/cleanup.go +++ b/pkg/proxy/conntrack/cleanup.go @@ -27,23 +27,23 @@ import ( // CleanStaleEntries takes care of flushing stale conntrack entries for services and endpoints. func CleanStaleEntries(isIPv6 bool, exec utilexec.Interface, svcPortMap proxy.ServicePortMap, - serviceUpdateResult proxy.UpdateServiceMapResult, endpointUpdateResult proxy.UpdateEndpointMapResult) { + serviceUpdateResult proxy.UpdateServiceMapResult, endpointsUpdateResult proxy.UpdateEndpointsMapResult) { - deleteStaleServiceConntrackEntries(isIPv6, exec, svcPortMap, serviceUpdateResult, endpointUpdateResult) - deleteStaleEndpointConntrackEntries(exec, svcPortMap, endpointUpdateResult) + deleteStaleServiceConntrackEntries(isIPv6, exec, svcPortMap, serviceUpdateResult, endpointsUpdateResult) + deleteStaleEndpointConntrackEntries(exec, svcPortMap, endpointsUpdateResult) } // deleteStaleServiceConntrackEntries takes care of flushing stale conntrack entries related // to UDP Service IPs. When a service has no endpoints and we drop traffic to it, conntrack // may create "black hole" entries for that IP+port. When the service gets endpoints we // need to delete those entries so further traffic doesn't get dropped. -func deleteStaleServiceConntrackEntries(isIPv6 bool, exec utilexec.Interface, svcPortMap proxy.ServicePortMap, serviceUpdateResult proxy.UpdateServiceMapResult, endpointUpdateResult proxy.UpdateEndpointMapResult) { +func deleteStaleServiceConntrackEntries(isIPv6 bool, exec utilexec.Interface, svcPortMap proxy.ServicePortMap, serviceUpdateResult proxy.UpdateServiceMapResult, endpointsUpdateResult proxy.UpdateEndpointsMapResult) { conntrackCleanupServiceIPs := serviceUpdateResult.DeletedUDPClusterIPs conntrackCleanupServiceNodePorts := sets.New[int]() - // merge newly active services gathered from updateEndpointsMap + // merge newly active services gathered from endpointsUpdateResult // a UDP service that changes from 0 to non-0 endpoints is newly active. - for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { + for _, svcPortName := range endpointsUpdateResult.NewlyActiveUDPServices { if svcInfo, ok := svcPortMap[svcPortName]; ok { klog.V(4).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) conntrackCleanupServiceIPs.Insert(svcInfo.ClusterIP().String()) @@ -78,8 +78,8 @@ func deleteStaleServiceConntrackEntries(isIPv6 bool, exec utilexec.Interface, sv // deleteStaleEndpointConntrackEntries takes care of flushing stale conntrack entries related // to UDP endpoints. After a UDP endpoint is removed we must flush any conntrack entries // for it so that if the same client keeps sending, the packets will get routed to a new endpoint. -func deleteStaleEndpointConntrackEntries(exec utilexec.Interface, svcPortMap proxy.ServicePortMap, endpointUpdateResult proxy.UpdateEndpointMapResult) { - for _, epSvcPair := range endpointUpdateResult.DeletedUDPEndpoints { +func deleteStaleEndpointConntrackEntries(exec utilexec.Interface, svcPortMap proxy.ServicePortMap, endpointsUpdateResult proxy.UpdateEndpointsMapResult) { + for _, epSvcPair := range endpointsUpdateResult.DeletedUDPEndpoints { if svcInfo, ok := svcPortMap[epSvcPair.ServicePortName]; ok { endpointIP := proxyutil.IPPart(epSvcPair.Endpoint) nodePort := svcInfo.NodePort() diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index 47976dbf09493..36e6e98bacdb5 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -43,101 +43,79 @@ var supportedEndpointSliceAddressTypes = sets.New[string]( // or can be used for constructing a more specific EndpointInfo struct // defined by the proxier if needed. type BaseEndpointInfo struct { - Endpoint string // TODO: should be an endpointString type - // IsLocal indicates whether the endpoint is running in same host as kube-proxy. - IsLocal bool + endpoint string // TODO: should be an endpointString type - // ZoneHints represent the zone hints for the endpoint. This is based on - // endpoint.hints.forZones[*].name in the EndpointSlice API. - ZoneHints sets.Set[string] - // Ready indicates whether this endpoint is ready and NOT terminating. - // For pods, this is true if a pod has a ready status and a nil deletion timestamp. - // This is only set when watching EndpointSlices. If using Endpoints, this is always - // true since only ready endpoints are read from Endpoints. - // TODO: Ready can be inferred from Serving and Terminating below when enabled by default. - Ready bool - // Serving indiciates whether this endpoint is ready regardless of its terminating state. + // isLocal indicates whether the endpoint is running on same host as kube-proxy. + isLocal bool + + // ready indicates whether this endpoint is ready and NOT terminating, unless + // PublishNotReadyAddresses is set on the service, in which case it will just + // always be true. + ready bool + // serving indicates whether this endpoint is ready regardless of its terminating state. // For pods this is true if it has a ready status regardless of its deletion timestamp. - // This is only set when watching EndpointSlices. If using Endpoints, this is always - // true since only ready endpoints are read from Endpoints. - Serving bool - // Terminating indicates whether this endpoint is terminating. + serving bool + // terminating indicates whether this endpoint is terminating. // For pods this is true if it has a non-nil deletion timestamp. - // This is only set when watching EndpointSlices. If using Endpoints, this is always - // false since terminating endpoints are always excluded from Endpoints. - Terminating bool - - // NodeName is the name of the node this endpoint belongs to - NodeName string - // Zone is the name of the zone this endpoint belongs to - Zone string + terminating bool + + // zoneHints represent the zone hints for the endpoint. This is based on + // endpoint.hints.forZones[*].name in the EndpointSlice API. + zoneHints sets.Set[string] } var _ Endpoint = &BaseEndpointInfo{} // String is part of proxy.Endpoint interface. func (info *BaseEndpointInfo) String() string { - return info.Endpoint + return info.endpoint +} + +// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. +func (info *BaseEndpointInfo) IP() string { + return proxyutil.IPPart(info.endpoint) +} + +// Port returns just the Port part of the endpoint. +func (info *BaseEndpointInfo) Port() (int, error) { + return proxyutil.PortPart(info.endpoint) } -// GetIsLocal is part of proxy.Endpoint interface. -func (info *BaseEndpointInfo) GetIsLocal() bool { - return info.IsLocal +// IsLocal is part of proxy.Endpoint interface. +func (info *BaseEndpointInfo) IsLocal() bool { + return info.isLocal } // IsReady returns true if an endpoint is ready and not terminating. func (info *BaseEndpointInfo) IsReady() bool { - return info.Ready + return info.ready } // IsServing returns true if an endpoint is ready, regardless of if the // endpoint is terminating. func (info *BaseEndpointInfo) IsServing() bool { - return info.Serving + return info.serving } // IsTerminating retruns true if an endpoint is terminating. For pods, // that is any pod with a deletion timestamp. func (info *BaseEndpointInfo) IsTerminating() bool { - return info.Terminating -} - -// GetZoneHints returns the zone hint for the endpoint. -func (info *BaseEndpointInfo) GetZoneHints() sets.Set[string] { - return info.ZoneHints -} - -// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. -func (info *BaseEndpointInfo) IP() string { - return proxyutil.IPPart(info.Endpoint) -} - -// Port returns just the Port part of the endpoint. -func (info *BaseEndpointInfo) Port() (int, error) { - return proxyutil.PortPart(info.Endpoint) -} - -// GetNodeName returns the NodeName for this endpoint. -func (info *BaseEndpointInfo) GetNodeName() string { - return info.NodeName + return info.terminating } -// GetZone returns the Zone for this endpoint. -func (info *BaseEndpointInfo) GetZone() string { - return info.Zone +// ZoneHints returns the zone hint for the endpoint. +func (info *BaseEndpointInfo) ZoneHints() sets.Set[string] { + return info.zoneHints } -func newBaseEndpointInfo(IP, nodeName, zone string, port int, isLocal bool, - ready, serving, terminating bool, zoneHints sets.Set[string]) *BaseEndpointInfo { +func newBaseEndpointInfo(ip string, port int, isLocal, ready, serving, terminating bool, zoneHints sets.Set[string]) *BaseEndpointInfo { return &BaseEndpointInfo{ - Endpoint: net.JoinHostPort(IP, strconv.Itoa(port)), - IsLocal: isLocal, - Ready: ready, - Serving: serving, - Terminating: terminating, - ZoneHints: zoneHints, - NodeName: nodeName, - Zone: zone, + endpoint: net.JoinHostPort(ip, strconv.Itoa(port)), + isLocal: isLocal, + ready: ready, + serving: serving, + terminating: terminating, + zoneHints: zoneHints, } } @@ -147,9 +125,9 @@ type makeEndpointFunc func(info *BaseEndpointInfo, svcPortName *ServicePortName) // EndpointsMap's but just use the changes for any Proxier specific cleanup. type processEndpointsMapChangeFunc func(oldEndpointsMap, newEndpointsMap EndpointsMap) -// EndpointChangeTracker carries state about uncommitted changes to an arbitrary number of +// EndpointsChangeTracker carries state about uncommitted changes to an arbitrary number of // Endpoints, keyed by their namespace and name. -type EndpointChangeTracker struct { +type EndpointsChangeTracker struct { // lock protects lastChangeTriggerTimes lock sync.Mutex @@ -159,16 +137,16 @@ type EndpointChangeTracker struct { // Map from the Endpoints namespaced-name to the times of the triggers that caused the endpoints // object to change. Used to calculate the network-programming-latency. lastChangeTriggerTimes map[types.NamespacedName][]time.Time - // record the time when the endpointChangeTracker was created so we can ignore the endpoints + // record the time when the endpointsChangeTracker was created so we can ignore the endpoints // that were generated before, because we can't estimate the network-programming-latency on those. // This is specially problematic on restarts, because we process all the endpoints that may have been // created hours or days before. trackerStartTime time.Time } -// NewEndpointChangeTracker initializes an EndpointsChangeMap -func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc, ipFamily v1.IPFamily, recorder events.EventRecorder, processEndpointsMapChange processEndpointsMapChangeFunc) *EndpointChangeTracker { - return &EndpointChangeTracker{ +// NewEndpointsChangeTracker initializes an EndpointsChangeTracker +func NewEndpointsChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc, ipFamily v1.IPFamily, recorder events.EventRecorder, processEndpointsMapChange processEndpointsMapChangeFunc) *EndpointsChangeTracker { + return &EndpointsChangeTracker{ lastChangeTriggerTimes: make(map[types.NamespacedName][]time.Time), trackerStartTime: time.Now(), processEndpointsMapChange: processEndpointsMapChange, @@ -177,9 +155,9 @@ func NewEndpointChangeTracker(hostname string, makeEndpointInfo makeEndpointFunc } // EndpointSliceUpdate updates given service's endpoints change map based on the endpoints pair. -// It returns true if items changed, otherwise return false. Will add/update/delete items of EndpointsChangeMap. +// It returns true if items changed, otherwise return false. Will add/update/delete items of EndpointsChangeTracker. // If removeSlice is true, slice will be removed, otherwise it will be added or updated. -func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.EndpointSlice, removeSlice bool) bool { +func (ect *EndpointsChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.EndpointSlice, removeSlice bool) bool { if !supportedEndpointSliceAddressTypes.Has(string(endpointSlice.AddressType)) { klog.V(4).InfoS("EndpointSlice address type not supported by kube-proxy", "addressType", endpointSlice.AddressType) return false @@ -225,13 +203,13 @@ func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.E // PendingChanges returns a set whose keys are the names of the services whose endpoints // have changed since the last time ect was used to update an EndpointsMap. (You must call // this _before_ calling em.Update(ect).) -func (ect *EndpointChangeTracker) PendingChanges() sets.Set[string] { +func (ect *EndpointsChangeTracker) PendingChanges() sets.Set[string] { return ect.endpointSliceCache.pendingChanges() } // checkoutChanges returns a list of pending endpointsChanges and marks them as // applied. -func (ect *EndpointChangeTracker) checkoutChanges() []*endpointsChange { +func (ect *EndpointsChangeTracker) checkoutChanges() []*endpointsChange { metrics.EndpointChangesPending.Set(0) return ect.endpointSliceCache.checkoutChanges() @@ -239,7 +217,7 @@ func (ect *EndpointChangeTracker) checkoutChanges() []*endpointsChange { // checkoutTriggerTimes applies the locally cached trigger times to a map of // trigger times that have been passed in and empties the local cache. -func (ect *EndpointChangeTracker) checkoutTriggerTimes(lastChangeTriggerTimes *map[types.NamespacedName][]time.Time) { +func (ect *EndpointsChangeTracker) checkoutTriggerTimes(lastChangeTriggerTimes *map[types.NamespacedName][]time.Time) { ect.lock.Lock() defer ect.lock.Unlock() @@ -284,8 +262,8 @@ type endpointsChange struct { current EndpointsMap } -// UpdateEndpointMapResult is the updated results after applying endpoints changes. -type UpdateEndpointMapResult struct { +// UpdateEndpointsMapResult is the updated results after applying endpoints changes. +type UpdateEndpointsMapResult struct { // DeletedUDPEndpoints identifies UDP endpoints that have just been deleted. // Existing conntrack NAT entries pointing to these endpoints must be deleted to // ensure that no further traffic for the Service gets delivered to them. @@ -304,7 +282,7 @@ type UpdateEndpointMapResult struct { } // Update updates endpointsMap base on the given changes. -func (em EndpointsMap) Update(changes *EndpointChangeTracker) (result UpdateEndpointMapResult) { +func (em EndpointsMap) Update(changes *EndpointsChangeTracker) (result UpdateEndpointsMapResult) { result.DeletedUDPEndpoints = make([]ServiceEndpoint, 0) result.NewlyActiveUDPServices = make([]ServicePortName, 0) result.LastChangeTriggerTimes = make(map[types.NamespacedName][]time.Time) @@ -321,7 +299,7 @@ type EndpointsMap map[ServicePortName][]Endpoint // and clear the changes map. In addition it returns (via argument) and resets the // lastChangeTriggerTimes for all endpoints that were changed and will result in syncing // the proxy rules. apply triggers processEndpointsMapChange on every change. -func (em EndpointsMap) apply(ect *EndpointChangeTracker, deletedUDPEndpoints *[]ServiceEndpoint, +func (em EndpointsMap) apply(ect *EndpointsChangeTracker, deletedUDPEndpoints *[]ServiceEndpoint, newlyActiveUDPServices *[]ServicePortName, lastChangeTriggerTimes *map[types.NamespacedName][]time.Time) { if ect == nil { return @@ -364,7 +342,7 @@ func (em EndpointsMap) getLocalReadyEndpointIPs() map[types.NamespacedName]sets. continue } - if ep.GetIsLocal() { + if ep.IsLocal() { nsn := svcPortName.NamespacedName if localIPs[nsn] == nil { localIPs[nsn] = sets.New[string]() @@ -396,7 +374,7 @@ func (em EndpointsMap) LocalReadyEndpoints() map[types.NamespacedName]int { } // detectStaleConntrackEntries detects services that may be associated with stale conntrack entries. -// (See UpdateEndpointMapResult.DeletedUDPEndpoints and .NewlyActiveUDPServices.) +// (See UpdateEndpointsMapResult.DeletedUDPEndpoints and .NewlyActiveUDPServices.) func detectStaleConntrackEntries(oldEndpointsMap, newEndpointsMap EndpointsMap, deletedUDPEndpoints *[]ServiceEndpoint, newlyActiveUDPServices *[]ServicePortName) { // Find the UDP endpoints that we were sending traffic to in oldEndpointsMap, but // are no longer sending to newEndpointsMap. The proxier should make sure that diff --git a/pkg/proxy/endpoints_test.go b/pkg/proxy/endpoints_test.go index 824a5a91461f2..686c71f3d6fe6 100644 --- a/pkg/proxy/endpoints_test.go +++ b/pkg/proxy/endpoints_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func (proxier *FakeProxier) addEndpointSlice(slice *discovery.EndpointSlice) { @@ -54,7 +54,7 @@ func TestGetLocalEndpointIPs(t *testing.T) { // Case[1]: unnamed port endpointsMap: EndpointsMap{ makeServicePortName("ns1", "ep1", "", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expected: map[types.NamespacedName]sets.Set[string]{}, @@ -62,7 +62,7 @@ func TestGetLocalEndpointIPs(t *testing.T) { // Case[2]: unnamed port local endpointsMap: EndpointsMap{ makeServicePortName("ns1", "ep1", "", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expected: map[types.NamespacedName]sets.Set[string]{ @@ -72,12 +72,12 @@ func TestGetLocalEndpointIPs(t *testing.T) { // Case[3]: named local and non-local ports for the same IP. endpointsMap: EndpointsMap{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.2:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.1:12", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.2:12", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expected: map[types.NamespacedName]sets.Set[string]{ @@ -87,21 +87,21 @@ func TestGetLocalEndpointIPs(t *testing.T) { // Case[4]: named local and non-local ports for different IPs. endpointsMap: EndpointsMap{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "2.2.2.2:22", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "2.2.2.22:22", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p23", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "2.2.2.3:23", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "4.4.4.4:44", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "4.4.4.5:44", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns4", "ep4", "p45", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "4.4.4.6:45", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expected: map[types.NamespacedName]sets.Set[string]{ @@ -112,21 +112,21 @@ func TestGetLocalEndpointIPs(t *testing.T) { // Case[5]: named local and non-local ports for different IPs, some not ready. endpointsMap: EndpointsMap{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "2.2.2.2:22", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "2.2.2.22:22", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p23", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true, Ready: false, Serving: true, Terminating: true}, + &BaseEndpointInfo{endpoint: "2.2.2.3:23", isLocal: true, ready: false, serving: true, terminating: true}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "4.4.4.4:44", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "4.4.4.5:44", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns4", "ep4", "p45", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "4.4.4.6:45", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expected: map[types.NamespacedName]sets.Set[string]{ @@ -137,21 +137,21 @@ func TestGetLocalEndpointIPs(t *testing.T) { // Case[6]: all endpoints are terminating,, so getLocalReadyEndpointIPs should return 0 ready endpoints endpointsMap: EndpointsMap{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: false, Serving: true, Terminating: true}, + &BaseEndpointInfo{endpoint: "1.1.1.1:11", isLocal: false, ready: false, serving: true, terminating: true}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: false, Serving: true, Terminating: true}, - &BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true, Ready: false, Serving: true, Terminating: true}, + &BaseEndpointInfo{endpoint: "2.2.2.2:22", isLocal: true, ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "2.2.2.22:22", isLocal: true, ready: false, serving: true, terminating: true}, }, makeServicePortName("ns2", "ep2", "p23", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true, Ready: false, Serving: true, Terminating: true}, + &BaseEndpointInfo{endpoint: "2.2.2.3:23", isLocal: true, ready: false, serving: true, terminating: true}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: false, Serving: true, Terminating: true}, - &BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: false, Ready: false, Serving: true, Terminating: true}, + &BaseEndpointInfo{endpoint: "4.4.4.4:44", isLocal: true, ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "4.4.4.5:44", isLocal: false, ready: false, serving: true, terminating: true}, }, makeServicePortName("ns4", "ep4", "p45", v1.ProtocolTCP): []Endpoint{ - &BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true, Ready: false, Serving: true, Terminating: true}, + &BaseEndpointInfo{endpoint: "4.4.4.6:45", isLocal: true, ready: false, serving: true, terminating: true}, }, }, expected: make(map[types.NamespacedName]sets.Set[string], 0), @@ -184,9 +184,6 @@ func makeTestEndpointSlice(namespace, name string, slice int, epsFunc func(*disc } func TestUpdateEndpointsMap(t *testing.T) { - var nodeName = testHostname - udp := v1.ProtocolUDP - emptyEndpoint := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{} } @@ -195,61 +192,61 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To(""), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } unnamedPortReady := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To(""), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } unnamedPortTerminating := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To(""), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } unnamedPortLocal := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To(""), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPortLocal := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPort := func(eps *discovery.EndpointSlice) { @@ -257,9 +254,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPortRenamed := func(eps *discovery.EndpointSlice) { @@ -267,9 +264,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11-2"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11-2"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPortRenumbered := func(eps *discovery.EndpointSlice) { @@ -277,9 +274,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(22), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPortsLocalNoLocal := func(eps *discovery.EndpointSlice) { @@ -287,16 +284,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }, { Addresses: []string{"1.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udp, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsets_s1 := func(eps *discovery.EndpointSlice) { @@ -304,9 +301,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsets_s2 := func(eps *discovery.EndpointSlice) { @@ -314,9 +311,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udp, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsWithLocal_s1 := func(eps *discovery.EndpointSlice) { @@ -324,35 +321,35 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsWithLocal_s2 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udp, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsMultiplePortsLocal_s1 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udp, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsMultiplePortsLocal_s2 := func(eps *discovery.EndpointSlice) { @@ -360,9 +357,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p13"), - Port: pointer.Int32(13), - Protocol: &udp, + Name: ptr.To("p13"), + Port: ptr.To[int32](13), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsIPsPorts1_s1 := func(eps *discovery.EndpointSlice) { @@ -370,16 +367,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }, { Addresses: []string{"1.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udp, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsIPsPorts1_s2 := func(eps *discovery.EndpointSlice) { @@ -387,16 +384,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.3"}, }, { Addresses: []string{"1.1.1.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p13"), - Port: pointer.Int32(13), - Protocol: &udp, + Name: ptr.To("p13"), + Port: ptr.To[int32](13), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p14"), - Port: pointer.Int32(14), - Protocol: &udp, + Name: ptr.To("p14"), + Port: ptr.To[int32](14), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsIPsPorts2 := func(eps *discovery.EndpointSlice) { @@ -404,16 +401,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"2.2.2.1"}, }, { Addresses: []string{"2.2.2.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p21"), - Port: pointer.Int32(21), - Protocol: &udp, + Name: ptr.To("p21"), + Port: ptr.To[int32](21), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p22"), - Port: pointer.Int32(22), - Protocol: &udp, + Name: ptr.To("p22"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore1 := func(eps *discovery.EndpointSlice) { @@ -421,59 +418,59 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore2_s1 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"2.2.2.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"2.2.2.22"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p22"), - Port: pointer.Int32(22), - Protocol: &udp, + Name: ptr.To("p22"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore2_s2 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"2.2.2.3"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p23"), - Port: pointer.Int32(23), - Protocol: &udp, + Name: ptr.To("p23"), + Port: ptr.To[int32](23), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore4_s1 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"4.4.4.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"4.4.4.5"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p44"), - Port: pointer.Int32(44), - Protocol: &udp, + Name: ptr.To("p44"), + Port: ptr.To[int32](44), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore4_s2 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"4.4.4.6"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p45"), - Port: pointer.Int32(45), - Protocol: &udp, + Name: ptr.To("p45"), + Port: ptr.To[int32](45), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexAfter1_s1 := func(eps *discovery.EndpointSlice) { @@ -483,9 +480,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.11"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexAfter1_s2 := func(eps *discovery.EndpointSlice) { @@ -493,13 +490,13 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udp, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p122"), - Port: pointer.Int32(122), - Protocol: &udp, + Name: ptr.To("p122"), + Port: ptr.To[int32](122), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexAfter3 := func(eps *discovery.EndpointSlice) { @@ -507,20 +504,20 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"3.3.3.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p33"), - Port: pointer.Int32(33), - Protocol: &udp, + Name: ptr.To("p33"), + Port: ptr.To[int32](33), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexAfter4 := func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"4.4.4.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p44"), - Port: pointer.Int32(44), - Protocol: &udp, + Name: ptr.To("p44"), + Port: ptr.To[int32](44), + Protocol: ptr.To(v1.ProtocolUDP), }} } @@ -555,12 +552,12 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -577,12 +574,12 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -603,18 +600,18 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -633,24 +630,24 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -673,54 +670,54 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.4:13", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:14", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.4:14", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:21", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "2.2.2.2:21", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:22", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "2.2.2.2:22", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.4:13", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:14", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.4:14", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:21", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "2.2.2.2:21", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:22", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "2.2.2.2:22", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -741,7 +738,7 @@ func TestUpdateEndpointsMap(t *testing.T) { previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{}, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -762,7 +759,7 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{}, @@ -783,17 +780,17 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -814,17 +811,17 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:11", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{{ @@ -852,15 +849,15 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -883,15 +880,15 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{{ @@ -911,12 +908,12 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{{ @@ -938,12 +935,12 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:22", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{{ @@ -983,39 +980,39 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {Endpoint: "2.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.22:22", isLocal: true, ready: true, serving: true, terminating: false}, + {endpoint: "2.2.2.2:22", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): { - {Endpoint: "2.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.3:23", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): { - {Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "4.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "4.4.4.4:44", isLocal: true, ready: true, serving: true, terminating: false}, + {endpoint: "4.4.4.5:44", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): { - {Endpoint: "4.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "4.4.4.6:45", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.11:11", isLocal: false, ready: true, serving: true, terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:122", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): { - {Endpoint: "3.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "3.3.3.3:33", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): { - {Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "4.4.4.4:44", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{{ @@ -1054,7 +1051,7 @@ func TestUpdateEndpointsMap(t *testing.T) { previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{}, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -1073,12 +1070,12 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: false, Serving: true, Terminating: true}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: false, serving: true, terminating: true}, }, }, expectedDeletedUDPEndpoints: []ServiceEndpoint{}, @@ -1095,7 +1092,7 @@ func TestUpdateEndpointsMap(t *testing.T) { }, previousEndpointsMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: false, Serving: true, Terminating: true}, + {endpoint: "1.1.1.1:11", isLocal: false, ready: false, serving: true, terminating: true}, }, }, expectedResult: map[ServicePortName][]*BaseEndpointInfo{}, @@ -1112,7 +1109,7 @@ func TestUpdateEndpointsMap(t *testing.T) { for tci, tc := range testCases { t.Run(tc.name, func(t *testing.T) { fp := newFakeProxier(v1.IPv4Protocol, time.Time{}) - fp.hostname = nodeName + fp.hostname = testHostname // First check that after adding all previous versions of endpoints, // the fp.previousEndpointsMap is as we expect. @@ -1199,7 +1196,6 @@ func TestLastChangeTriggerTime(t *testing.T) { t3 := t2.Add(time.Second) createEndpoints := func(namespace, name string, triggerTime time.Time) *discovery.EndpointSlice { - tcp := v1.ProtocolTCP return &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -1216,9 +1212,9 @@ func TestLastChangeTriggerTime(t *testing.T) { Addresses: []string{"1.1.1.1"}, }}, Ports: []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &tcp, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolTCP), }}, } } @@ -1335,12 +1331,12 @@ func TestLastChangeTriggerTime(t *testing.T) { } func TestEndpointSliceUpdate(t *testing.T) { - fqdnSlice := generateEndpointSlice("svc1", "ns1", 2, 5, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}) + fqdnSlice := generateEndpointSlice("svc1", "ns1", 2, 5, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}) fqdnSlice.AddressType = discovery.AddressTypeFQDN testCases := map[string]struct { startingSlices []*discovery.EndpointSlice - endpointChangeTracker *EndpointChangeTracker + endpointsChangeTracker *EndpointsChangeTracker namespacedName types.NamespacedName paramEndpointSlice *discovery.EndpointSlice paramRemoveSlice bool @@ -1350,22 +1346,22 @@ func TestEndpointSliceUpdate(t *testing.T) { }{ // test starting from an empty state "add a simple slice that doesn't already exist": { - startingSlices: []*discovery.EndpointSlice{}, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: false, - expectedReturnVal: true, + startingSlices: []*discovery.EndpointSlice{}, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: false, ready: true, serving: true, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1373,11 +1369,11 @@ func TestEndpointSliceUpdate(t *testing.T) { // test no modification to state - current change should be nil as nothing changes "add the same slice that already exists": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), paramRemoveSlice: false, expectedReturnVal: false, expectedCurrentChange: nil, @@ -1386,9 +1382,9 @@ func TestEndpointSliceUpdate(t *testing.T) { // ensure that only valide address types are processed "add an FQDN slice (invalid address type)": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, paramEndpointSlice: fqdnSlice, paramRemoveSlice: false, @@ -1399,32 +1395,32 @@ func TestEndpointSliceUpdate(t *testing.T) { // test additions to existing state "add a slice that overlaps with existing state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 5, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: false, - expectedReturnVal: true, + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + }, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 5, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.5:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:80", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.5:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.5:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:443", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1432,30 +1428,30 @@ func TestEndpointSliceUpdate(t *testing.T) { // test additions to existing state with partially overlapping slices and ports "add a slice that overlaps with existing state and partial ports": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSliceWithOffset("svc1", "ns1", 3, 1, 5, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80)}), - paramRemoveSlice: false, - expectedReturnVal: true, + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + }, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSliceWithOffset("svc1", "ns1", 3, 1, 5, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.5:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:80", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:443", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1463,22 +1459,22 @@ func TestEndpointSliceUpdate(t *testing.T) { // test deletions from existing state with partially overlapping slices and ports "remove a slice that overlaps with existing state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 5, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: true, - expectedReturnVal: true, + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + }, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 5, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: true, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:80", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.2.1:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:443", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1486,12 +1482,12 @@ func TestEndpointSliceUpdate(t *testing.T) { // ensure a removal that has no effect turns into a no-op "remove a slice that doesn't even exist in current state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 5, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 5, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + generateEndpointSlice("svc1", "ns1", 2, 2, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 3, 5, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 3, 5, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), paramRemoveSlice: true, expectedReturnVal: false, expectedCurrentChange: nil, @@ -1500,23 +1496,23 @@ func TestEndpointSliceUpdate(t *testing.T) { // start with all endpoints ready, transition to no endpoints ready "transition all endpoints to unready state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 1, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: false, - expectedReturnVal: true, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 1, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true, Ready: false, Serving: false, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: true, ready: false, serving: false, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: true, Ready: false, Serving: false, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: true, ready: false, serving: false, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1524,21 +1520,21 @@ func TestEndpointSliceUpdate(t *testing.T) { // start with no endpoints ready, transition to all endpoints ready "transition all endpoints to ready state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 2, 1, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 2, 1, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 2, 999, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: false, - expectedReturnVal: true, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 2, 999, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: true, serving: true, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1546,28 +1542,28 @@ func TestEndpointSliceUpdate(t *testing.T) { // start with some endpoints ready, transition to more endpoints ready "transition some endpoints to ready state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 2, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - generateEndpointSlice("svc1", "ns1", 2, 2, 2, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 3, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: false, - expectedReturnVal: true, + generateEndpointSlice("svc1", "ns1", 1, 3, 2, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + generateEndpointSlice("svc1", "ns1", 2, 2, 2, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + }, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 3, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true, Ready: false, Serving: false, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:80", isLocal: true, ready: false, serving: false, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true, Ready: false, Serving: false, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:443", isLocal: true, ready: false, serving: false, terminating: false}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1575,28 +1571,28 @@ func TestEndpointSliceUpdate(t *testing.T) { // start with some endpoints ready, transition to some terminating "transition some endpoints to terminating state": { startingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 2, 2, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - generateEndpointSlice("svc1", "ns1", 2, 2, 2, 2, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - }, - endpointChangeTracker: NewEndpointChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), - namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, - paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 3, 2, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), - paramRemoveSlice: false, - expectedReturnVal: true, + generateEndpointSlice("svc1", "ns1", 1, 3, 2, 2, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + generateEndpointSlice("svc1", "ns1", 2, 2, 2, 2, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + }, + endpointsChangeTracker: NewEndpointsChangeTracker("host1", nil, v1.IPv4Protocol, nil, nil), + namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, + paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 3, 2, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), + paramRemoveSlice: false, + expectedReturnVal: true, expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: false, Serving: true, Terminating: true}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true, Ready: false, Serving: false, Terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:80", isLocal: true, ready: false, serving: false, terminating: true}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: false, Serving: true, Terminating: true}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: true, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true, Ready: false, Serving: false, Terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: true, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:443", isLocal: true, ready: false, serving: false, terminating: true}, }, }, expectedChangedEndpoints: sets.New[string]("ns1/svc1"), @@ -1605,19 +1601,19 @@ func TestEndpointSliceUpdate(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - initializeCache(tc.endpointChangeTracker.endpointSliceCache, tc.startingSlices) + initializeCache(tc.endpointsChangeTracker.endpointSliceCache, tc.startingSlices) - got := tc.endpointChangeTracker.EndpointSliceUpdate(tc.paramEndpointSlice, tc.paramRemoveSlice) + got := tc.endpointsChangeTracker.EndpointSliceUpdate(tc.paramEndpointSlice, tc.paramRemoveSlice) if !reflect.DeepEqual(got, tc.expectedReturnVal) { t.Errorf("EndpointSliceUpdate return value got: %v, want %v", got, tc.expectedReturnVal) } - pendingChanges := tc.endpointChangeTracker.PendingChanges() + pendingChanges := tc.endpointsChangeTracker.PendingChanges() if !pendingChanges.Equal(tc.expectedChangedEndpoints) { t.Errorf("expected changed endpoints %q, got %q", tc.expectedChangedEndpoints.UnsortedList(), pendingChanges.UnsortedList()) } - changes := tc.endpointChangeTracker.checkoutChanges() + changes := tc.endpointsChangeTracker.checkoutChanges() if tc.expectedCurrentChange == nil { if len(changes) != 0 { t.Errorf("Expected %s to have no changes", tc.namespacedName) @@ -1637,47 +1633,63 @@ func TestCheckoutChanges(t *testing.T) { svcPortName1 := ServicePortName{types.NamespacedName{Namespace: "ns1", Name: "svc1"}, "port-1", v1.ProtocolTCP} testCases := map[string]struct { - endpointChangeTracker *EndpointChangeTracker - expectedChanges []*endpointsChange - items map[types.NamespacedName]*endpointsChange - appliedSlices []*discovery.EndpointSlice - pendingSlices []*discovery.EndpointSlice + endpointsChangeTracker *EndpointsChangeTracker + expectedChanges []*endpointsChange + items map[types.NamespacedName]*endpointsChange + appliedSlices []*discovery.EndpointSlice + pendingSlices []*discovery.EndpointSlice }{ "empty slices": { - endpointChangeTracker: NewEndpointChangeTracker("", nil, v1.IPv4Protocol, nil, nil), - expectedChanges: []*endpointsChange{}, - appliedSlices: []*discovery.EndpointSlice{}, - pendingSlices: []*discovery.EndpointSlice{}, + endpointsChangeTracker: NewEndpointsChangeTracker("", nil, v1.IPv4Protocol, nil, nil), + expectedChanges: []*endpointsChange{}, + appliedSlices: []*discovery.EndpointSlice{}, + pendingSlices: []*discovery.EndpointSlice{}, }, "adding initial slice": { - endpointChangeTracker: NewEndpointChangeTracker("", nil, v1.IPv4Protocol, nil, nil), + endpointsChangeTracker: NewEndpointsChangeTracker("", nil, v1.IPv4Protocol, nil, nil), expectedChanges: []*endpointsChange{{ previous: EndpointsMap{}, current: EndpointsMap{ - svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", "host1", true, true, false), newTestEp("10.0.1.2:80", "host1", false, true, true), newTestEp("10.0.1.3:80", "host1", false, false, false)}, + svcPortName0: []Endpoint{ + &BaseEndpointInfo{endpoint: "10.0.1.1:80", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", ready: false, serving: false, terminating: false}, + }, }, }}, appliedSlices: []*discovery.EndpointSlice{}, pendingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 3, 2, []string{"host1"}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 3, 2, []string{"host1"}, []*int32{ptr.To[int32](80)}), }, }, "removing port in update": { - endpointChangeTracker: NewEndpointChangeTracker("", nil, v1.IPv4Protocol, nil, nil), + endpointsChangeTracker: NewEndpointsChangeTracker("", nil, v1.IPv4Protocol, nil, nil), expectedChanges: []*endpointsChange{{ previous: EndpointsMap{ - svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", "host1", true, true, false), newTestEp("10.0.1.2:80", "host1", true, true, false), newTestEp("10.0.1.3:80", "host1", false, false, false)}, - svcPortName1: []Endpoint{newTestEp("10.0.1.1:443", "host1", true, true, false), newTestEp("10.0.1.2:443", "host1", true, true, false), newTestEp("10.0.1.3:443", "host1", false, false, false)}, + svcPortName0: []Endpoint{ + &BaseEndpointInfo{endpoint: "10.0.1.1:80", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", ready: false, serving: false, terminating: false}, + }, + svcPortName1: []Endpoint{ + &BaseEndpointInfo{endpoint: "10.0.1.1:443", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", ready: false, serving: false, terminating: false}, + }, }, current: EndpointsMap{ - svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", "host1", true, true, false), newTestEp("10.0.1.2:80", "host1", true, true, false), newTestEp("10.0.1.3:80", "host1", false, false, false)}, + svcPortName0: []Endpoint{ + &BaseEndpointInfo{endpoint: "10.0.1.1:80", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", ready: false, serving: false, terminating: false}, + }, }, }}, appliedSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 3, 999, []string{"host1"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 3, 999, []string{"host1"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, pendingSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 3, 999, []string{"host1"}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 3, 999, []string{"host1"}, []*int32{ptr.To[int32](80)}), }, }, } @@ -1685,13 +1697,13 @@ func TestCheckoutChanges(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { for _, slice := range tc.appliedSlices { - tc.endpointChangeTracker.EndpointSliceUpdate(slice, false) + tc.endpointsChangeTracker.EndpointSliceUpdate(slice, false) } - tc.endpointChangeTracker.checkoutChanges() + tc.endpointsChangeTracker.checkoutChanges() for _, slice := range tc.pendingSlices { - tc.endpointChangeTracker.EndpointSliceUpdate(slice, false) + tc.endpointsChangeTracker.EndpointSliceUpdate(slice, false) } - changes := tc.endpointChangeTracker.checkoutChanges() + changes := tc.endpointsChangeTracker.checkoutChanges() if len(tc.expectedChanges) != len(changes) { t.Fatalf("Expected %d changes, got %d", len(tc.expectedChanges), len(changes)) @@ -1720,7 +1732,7 @@ func compareEndpointsMapsStr(t *testing.T, newMap EndpointsMap, expected map[Ser t.Fatalf("expected %d results, got %d: %v", len(expected), len(newMap), newMap) } endpointEqual := func(a, b *BaseEndpointInfo) bool { - return a.Endpoint == b.Endpoint && a.IsLocal == b.IsLocal && a.Ready == b.Ready && a.Serving == b.Serving && a.Terminating == b.Terminating + return a.endpoint == b.endpoint && a.isLocal == b.isLocal && a.ready == b.ready && a.serving == b.serving && a.terminating == b.terminating } for x := range expected { if len(newMap[x]) != len(expected[x]) { @@ -1730,27 +1742,19 @@ func compareEndpointsMapsStr(t *testing.T, newMap EndpointsMap, expected map[Ser for i := range expected[x] { newEp, ok := newMap[x][i].(*BaseEndpointInfo) if !ok { - t.Fatalf("Failed to cast endpointsInfo") + t.Fatalf("Failed to cast endpointInfo") } if !endpointEqual(newEp, expected[x][i]) { t.Fatalf("expected new[%v][%d] to be %v, got %v"+ "(IsLocal expected %v, got %v) (Ready expected %v, got %v) (Serving expected %v, got %v) (Terminating expected %v got %v)", - x, i, expected[x][i], newEp, expected[x][i].IsLocal, newEp.IsLocal, expected[x][i].Ready, newEp.Ready, - expected[x][i].Serving, newEp.Serving, expected[x][i].Terminating, newEp.Terminating) + x, i, expected[x][i], newEp, expected[x][i].isLocal, newEp.isLocal, expected[x][i].ready, newEp.ready, + expected[x][i].serving, newEp.serving, expected[x][i].terminating, newEp.terminating) } } } } } -func newTestEp(ep, host string, ready, serving, terminating bool) *BaseEndpointInfo { - endpointInfo := &BaseEndpointInfo{Endpoint: ep, Ready: ready, Serving: serving, Terminating: terminating} - if host != "" { - endpointInfo.NodeName = host - } - return endpointInfo -} - func initializeCache(endpointSliceCache *EndpointSliceCache, endpointSlices []*discovery.EndpointSlice) { for _, endpointSlice := range endpointSlices { endpointSliceCache.updatePending(endpointSlice, false) diff --git a/pkg/proxy/endpointslicecache.go b/pkg/proxy/endpointslicecache.go index ffbab9913c4b6..87e4305e60c42 100644 --- a/pkg/proxy/endpointslicecache.go +++ b/pkg/proxy/endpointslicecache.go @@ -294,19 +294,9 @@ func (cache *EndpointSliceCache) addEndpoints(svcPortName *ServicePortName, port continue } - isLocal := false - nodeName := "" - if endpoint.NodeName != nil { - isLocal = cache.isLocal(*endpoint.NodeName) - nodeName = *endpoint.NodeName - } - - zone := "" - if endpoint.Zone != nil { - zone = *endpoint.Zone - } + isLocal := endpoint.NodeName != nil && cache.isLocal(*endpoint.NodeName) - endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], nodeName, zone, portNum, isLocal, + endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], portNum, isLocal, endpoint.Ready, endpoint.Serving, endpoint.Terminating, endpoint.ZoneHints) // This logic ensures we're deduplicating potential overlapping endpoints diff --git a/pkg/proxy/endpointslicecache_test.go b/pkg/proxy/endpointslicecache_test.go index 414d2ef60b885..30129c6b0755f 100644 --- a/pkg/proxy/endpointslicecache_test.go +++ b/pkg/proxy/endpointslicecache_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestEndpointsMapFromESC(t *testing.T) { @@ -40,35 +40,35 @@ func TestEndpointsMapFromESC(t *testing.T) { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, hostname: "host1", endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80), pointer.Int32(443)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80), ptr.To[int32](443)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, }, makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:443", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:443", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:443", isLocal: false, ready: true, serving: true, terminating: false}, }, }, }, "2 slices, same port": { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc1", "ns1", 2, 3, 999, 999, []string{}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc1", "ns1", 2, 3, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.2.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.2:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.2.3:80", isLocal: false, ready: true, serving: true, terminating: false}, }, }, }, @@ -77,15 +77,15 @@ func TestEndpointsMapFromESC(t *testing.T) { "2 overlapping slices, same port": { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc1", "ns1", 1, 4, 999, 999, []string{}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc1", "ns1", 1, 4, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:80", isLocal: false, ready: true, serving: true, terminating: false}, }, }, }, @@ -96,21 +96,21 @@ func TestEndpointsMapFromESC(t *testing.T) { "2 slices, overlapping endpoints, some endpoints unready in 1 or both": { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 10, 3, 999, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc1", "ns1", 1, 10, 6, 999, []string{}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 10, 3, 999, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc1", "ns1", 1, 10, 6, 999, []string{}, []*int32{ptr.To[int32](80)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.10:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.6:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.7:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.8:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.9:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.10:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.5:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.6:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.7:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.8:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.9:80", isLocal: false, ready: true, serving: true, terminating: false}, }, }, }, @@ -118,57 +118,57 @@ func TestEndpointsMapFromESC(t *testing.T) { "2 slices, overlapping endpoints, some endpoints unready and some endpoints terminating": { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 10, 3, 5, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc1", "ns1", 1, 10, 6, 5, []string{}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 10, 3, 5, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc1", "ns1", 1, 10, 6, 5, []string{}, []*int32{ptr.To[int32](80)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.10:80", IsLocal: false, Ready: false, Serving: true, Terminating: true}, - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: false, Ready: false, Serving: true, Terminating: true}, - &BaseEndpointInfo{Endpoint: "10.0.1.6:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.7:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.8:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.9:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.10:80", isLocal: false, ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.5:80", isLocal: false, ready: false, serving: true, terminating: true}, + &BaseEndpointInfo{endpoint: "10.0.1.6:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.7:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.8:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.9:80", isLocal: false, ready: true, serving: true, terminating: false}, }, }, }, "2 slices, overlapping endpoints, all unready": { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 10, 1, 999, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc1", "ns1", 1, 10, 1, 999, []string{}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 10, 1, 999, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc1", "ns1", 1, 10, 1, 999, []string{}, []*int32{ptr.To[int32](80)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.10:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.6:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.7:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.8:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.9:80", IsLocal: false, Ready: false, Serving: false, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.10:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.4:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.5:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.6:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.7:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.8:80", isLocal: false, ready: false, serving: false, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.9:80", isLocal: false, ready: false, serving: false, terminating: false}, }, }, }, "3 slices with different services and namespaces": { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc2", "ns1", 2, 3, 999, 999, []string{}, []*int32{pointer.Int32(80)}), - generateEndpointSlice("svc1", "ns2", 3, 3, 999, 999, []string{}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc2", "ns1", 2, 3, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), + generateEndpointSlice("svc1", "ns2", 3, 3, 999, 999, []string{}, []*int32{ptr.To[int32](80)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.3:80", isLocal: false, ready: true, serving: true, terminating: false}, }, }, }, @@ -188,15 +188,15 @@ func TestEndpointsMapFromESC(t *testing.T) { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, hostname: "host1", endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSliceWithOffset("svc1", "ns1", 1, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80)}), - generateEndpointSliceWithOffset("svc1", "ns1", 2, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(8080)}), + generateEndpointSliceWithOffset("svc1", "ns1", 1, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80)}), + generateEndpointSliceWithOffset("svc1", "ns1", 2, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](8080)}), }, expectedMap: map[ServicePortName][]*BaseEndpointInfo{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { - &BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.1:8080", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true, Ready: true, Serving: true, Terminating: false}, - &BaseEndpointInfo{Endpoint: "10.0.1.2:8080", IsLocal: true, Ready: true, Serving: true, Terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:80", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.1:8080", isLocal: false, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:80", isLocal: true, ready: true, serving: true, terminating: false}, + &BaseEndpointInfo{endpoint: "10.0.1.2:8080", isLocal: true, ready: true, serving: true, terminating: false}, }, }, }, @@ -228,33 +228,30 @@ func TestEndpointInfoByServicePort(t *testing.T) { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, hostname: "host1", endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80)}), + generateEndpointSlice("svc1", "ns1", 1, 3, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80)}), }, expectedMap: spToEndpointMap{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { "10.0.1.1:80": &BaseEndpointInfo{ - Endpoint: "10.0.1.1:80", - IsLocal: false, - NodeName: "host2", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.1:80", + isLocal: false, + ready: true, + serving: true, + terminating: false, }, "10.0.1.2:80": &BaseEndpointInfo{ - Endpoint: "10.0.1.2:80", - IsLocal: true, - NodeName: "host1", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.2:80", + isLocal: true, + ready: true, + serving: true, + terminating: false, }, "10.0.1.3:80": &BaseEndpointInfo{ - Endpoint: "10.0.1.3:80", - IsLocal: false, - NodeName: "host2", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.3:80", + isLocal: false, + ready: true, + serving: true, + terminating: false, }, }, }, @@ -263,42 +260,38 @@ func TestEndpointInfoByServicePort(t *testing.T) { namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"}, hostname: "host1", endpointSlices: []*discovery.EndpointSlice{ - generateEndpointSliceWithOffset("svc1", "ns1", 1, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(80)}), - generateEndpointSliceWithOffset("svc1", "ns1", 2, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{pointer.Int32(8080)}), + generateEndpointSliceWithOffset("svc1", "ns1", 1, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](80)}), + generateEndpointSliceWithOffset("svc1", "ns1", 2, 1, 2, 999, 999, []string{"host1", "host2"}, []*int32{ptr.To[int32](8080)}), }, expectedMap: spToEndpointMap{ makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): { "10.0.1.1:80": &BaseEndpointInfo{ - Endpoint: "10.0.1.1:80", - IsLocal: false, - NodeName: "host2", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.1:80", + isLocal: false, + ready: true, + serving: true, + terminating: false, }, "10.0.1.2:80": &BaseEndpointInfo{ - Endpoint: "10.0.1.2:80", - IsLocal: true, - NodeName: "host1", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.2:80", + isLocal: true, + ready: true, + serving: true, + terminating: false, }, "10.0.1.1:8080": &BaseEndpointInfo{ - Endpoint: "10.0.1.1:8080", - IsLocal: false, - NodeName: "host2", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.1:8080", + isLocal: false, + ready: true, + serving: true, + terminating: false, }, "10.0.1.2:8080": &BaseEndpointInfo{ - Endpoint: "10.0.1.2:8080", - IsLocal: true, - NodeName: "host1", - Ready: true, - Serving: true, - Terminating: false, + endpoint: "10.0.1.2:8080", + isLocal: true, + ready: true, + serving: true, + terminating: false, }, }, }, @@ -324,9 +317,8 @@ func TestEndpointInfoByServicePort(t *testing.T) { func TestEsInfoChanged(t *testing.T) { p80 := int32(80) p443 := int32(443) - tcpProto := v1.ProtocolTCP - port80 := discovery.EndpointPort{Port: &p80, Name: pointer.String("http"), Protocol: &tcpProto} - port443 := discovery.EndpointPort{Port: &p443, Name: pointer.String("https"), Protocol: &tcpProto} + port80 := discovery.EndpointPort{Port: &p80, Name: ptr.To("http"), Protocol: ptr.To(v1.ProtocolTCP)} + port443 := discovery.EndpointPort{Port: &p443, Name: ptr.To("https"), Protocol: ptr.To(v1.ProtocolTCP)} endpoint1 := discovery.Endpoint{Addresses: []string{"10.0.1.0"}} endpoint2 := discovery.Endpoint{Addresses: []string{"10.0.1.1"}} @@ -461,8 +453,6 @@ func TestEsInfoChanged(t *testing.T) { } func generateEndpointSliceWithOffset(serviceName, namespace string, sliceNum, offset, numEndpoints, unreadyMod int, terminatingMod int, hosts []string, portNums []*int32) *discovery.EndpointSlice { - tcpProtocol := v1.ProtocolTCP - endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", serviceName, sliceNum), @@ -476,9 +466,9 @@ func generateEndpointSliceWithOffset(serviceName, namespace string, sliceNum, of for i, portNum := range portNums { endpointSlice.Ports = append(endpointSlice.Ports, discovery.EndpointPort{ - Name: pointer.String(fmt.Sprintf("port-%d", i)), + Name: ptr.To(fmt.Sprintf("port-%d", i)), Port: portNum, - Protocol: &tcpProtocol, + Protocol: ptr.To(v1.ProtocolTCP), }) } @@ -486,9 +476,9 @@ func generateEndpointSliceWithOffset(serviceName, namespace string, sliceNum, of readyCondition := i%unreadyMod != 0 terminatingCondition := i%terminatingMod == 0 - ready := pointer.Bool(readyCondition && !terminatingCondition) - serving := pointer.Bool(readyCondition) - terminating := pointer.Bool(terminatingCondition) + ready := ptr.To(readyCondition && !terminatingCondition) + serving := ptr.To(readyCondition) + terminating := ptr.To(terminatingCondition) endpoint := discovery.Endpoint{ Addresses: []string{fmt.Sprintf("10.0.%d.%d", offset, i)}, diff --git a/pkg/proxy/healthcheck/healthcheck_test.go b/pkg/proxy/healthcheck/healthcheck_test.go index d77a561c8701f..c1c451ef8c299 100644 --- a/pkg/proxy/healthcheck/healthcheck_test.go +++ b/pkg/proxy/healthcheck/healthcheck_test.go @@ -470,7 +470,7 @@ func TestHealthzServer(t *testing.T) { httpFactory := newFakeHTTPServerFactory() fakeClock := testingclock.NewFakeClock(time.Now()) - hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil) + hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second) server := hs.httpFactory.New(hs.addr, healthzHandler{hs: hs}) hsTest := &serverTest{ @@ -480,26 +480,7 @@ func TestHealthzServer(t *testing.T) { tracking503: 0, } - // Should return 200 "OK" by default. - testHTTPHandler(hsTest, http.StatusOK, t) - - // Should return 200 "OK" after first update - hs.Updated() - testHTTPHandler(hsTest, http.StatusOK, t) - - // Should continue to return 200 "OK" as long as no further updates are queued - fakeClock.Step(25 * time.Second) - testHTTPHandler(hsTest, http.StatusOK, t) - - // Should return 503 "ServiceUnavailable" if exceed max update-processing time - hs.QueuedUpdate() - fakeClock.Step(25 * time.Second) - testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) - - // Should return 200 "OK" after processing update - hs.Updated() - fakeClock.Step(5 * time.Second) - testHTTPHandler(hsTest, http.StatusOK, t) + testProxierHealthUpdater(hs, hsTest, fakeClock, t) // Should return 200 "OK" if we've synced a node, tainted in any other way hs.SyncNode(makeNode(tweakTainted("other"))) @@ -524,7 +505,7 @@ func TestLivezServer(t *testing.T) { httpFactory := newFakeHTTPServerFactory() fakeClock := testingclock.NewFakeClock(time.Now()) - hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil) + hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second) server := hs.httpFactory.New(hs.addr, livezHandler{hs: hs}) hsTest := &serverTest{ @@ -534,26 +515,7 @@ func TestLivezServer(t *testing.T) { tracking503: 0, } - // Should return 200 "OK" by default. - testHTTPHandler(hsTest, http.StatusOK, t) - - // Should return 200 "OK" after first update - hs.Updated() - testHTTPHandler(hsTest, http.StatusOK, t) - - // Should continue to return 200 "OK" as long as no further updates are queued - fakeClock.Step(25 * time.Second) - testHTTPHandler(hsTest, http.StatusOK, t) - - // Should return 503 "ServiceUnavailable" if exceed max update-processing time - hs.QueuedUpdate() - fakeClock.Step(25 * time.Second) - testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) - - // Should return 200 "OK" after processing update - hs.Updated() - fakeClock.Step(5 * time.Second) - testHTTPHandler(hsTest, http.StatusOK, t) + testProxierHealthUpdater(hs, hsTest, fakeClock, t) // Should return 200 "OK" irrespective of node syncs hs.SyncNode(makeNode(tweakTainted("other"))) @@ -579,6 +541,77 @@ var ( livezURL url = "/livez" ) +func testProxierHealthUpdater(hs *ProxierHealthServer, hsTest *serverTest, fakeClock *testingclock.FakeClock, t *testing.T) { + // Should return 200 "OK" by default. + testHTTPHandler(hsTest, http.StatusOK, t) + + // Should return 200 "OK" after first update for both IPv4 and IPv6 proxiers. + hs.Updated(v1.IPv4Protocol) + hs.Updated(v1.IPv6Protocol) + testHTTPHandler(hsTest, http.StatusOK, t) + + // Should continue to return 200 "OK" as long as no further updates are queued for any proxier. + fakeClock.Step(25 * time.Second) + testHTTPHandler(hsTest, http.StatusOK, t) + + // Should return 503 "ServiceUnavailable" if IPv4 proxier exceed max update-processing time. + hs.QueuedUpdate(v1.IPv4Protocol) + fakeClock.Step(25 * time.Second) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + // Should return 200 "OK" after processing update for both IPv4 and IPv6 proxiers. + hs.Updated(v1.IPv4Protocol) + hs.Updated(v1.IPv6Protocol) + fakeClock.Step(5 * time.Second) + testHTTPHandler(hsTest, http.StatusOK, t) + + // Should return 503 "ServiceUnavailable" if IPv6 proxier exceed max update-processing time. + hs.QueuedUpdate(v1.IPv6Protocol) + fakeClock.Step(25 * time.Second) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + // Should return 200 "OK" after processing update for both IPv4 and IPv6 proxiers. + hs.Updated(v1.IPv4Protocol) + hs.Updated(v1.IPv6Protocol) + fakeClock.Step(5 * time.Second) + testHTTPHandler(hsTest, http.StatusOK, t) + + // Should return 503 "ServiceUnavailable" if both IPv4 and IPv6 proxiers exceed max update-processing time. + hs.QueuedUpdate(v1.IPv4Protocol) + hs.QueuedUpdate(v1.IPv6Protocol) + fakeClock.Step(25 * time.Second) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + // Should return 200 "OK" after processing update for both IPv4 and IPv6 proxiers. + hs.Updated(v1.IPv4Protocol) + hs.Updated(v1.IPv6Protocol) + fakeClock.Step(5 * time.Second) + testHTTPHandler(hsTest, http.StatusOK, t) + + // If IPv6 proxier is late for an update but IPv4 proxier is not then updating IPv4 proxier should have no effect. + hs.QueuedUpdate(v1.IPv6Protocol) + fakeClock.Step(25 * time.Second) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + hs.Updated(v1.IPv4Protocol) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + hs.Updated(v1.IPv6Protocol) + testHTTPHandler(hsTest, http.StatusOK, t) + + // If both IPv4 and IPv6 proxiers are late for an update, we shouldn't report 200 "OK" until after both of them update. + hs.QueuedUpdate(v1.IPv4Protocol) + hs.QueuedUpdate(v1.IPv6Protocol) + fakeClock.Step(25 * time.Second) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + hs.Updated(v1.IPv4Protocol) + testHTTPHandler(hsTest, http.StatusServiceUnavailable, t) + + hs.Updated(v1.IPv6Protocol) + testHTTPHandler(hsTest, http.StatusOK, t) +} + func testHTTPHandler(hsTest *serverTest, status int, t *testing.T) { handler := hsTest.server.(*fakeHTTPServer).handler req, err := http.NewRequest("GET", string(hsTest.url), nil) diff --git a/pkg/proxy/healthcheck/proxier_health.go b/pkg/proxy/healthcheck/proxier_health.go index 2ffdcc2b9d2db..7b009fba1350c 100644 --- a/pkg/proxy/healthcheck/proxier_health.go +++ b/pkg/proxy/healthcheck/proxier_health.go @@ -19,13 +19,11 @@ package healthcheck import ( "fmt" "net/http" - "sync/atomic" + "sync" "time" v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/events" "k8s.io/klog/v2" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy/metrics" "k8s.io/utils/clock" ) @@ -36,129 +34,135 @@ const ( ToBeDeletedTaint = "ToBeDeletedByClusterAutoscaler" ) -// ProxierHealthUpdater allows callers to update healthz timestamp only. -type ProxierHealthUpdater interface { - // QueuedUpdate should be called when the proxier receives a Service or Endpoints - // event containing information that requires updating service rules. - QueuedUpdate() - - // Updated should be called when the proxier has successfully updated the service - // rules to reflect the current state. - Updated() - - // Run starts the healthz HTTP server and blocks until it exits. - Run() error - - // Sync the node and determine if its eligible or not. Eligible is - // defined as being: not tainted by ToBeDeletedTaint and not deleted. - SyncNode(node *v1.Node) - - proxierHealthChecker -} - -var _ ProxierHealthUpdater = &proxierHealthServer{} -var zeroTime = time.Time{} - -// proxierHealthServer returns 200 "OK" by default. It verifies that the delay between -// QueuedUpdate() calls and Updated() calls never exceeds healthTimeout. -type proxierHealthServer struct { +// ProxierHealthServer allows callers to: +// 1. run a http server with /healthz and /livez endpoint handlers. +// 2. update healthz timestamps before and after synchronizing dataplane. +// 3. sync node status, for reporting unhealthy /healthz response +// if the node is marked for deletion by autoscaler. +// 4. get proxy health by verifying that the delay between QueuedUpdate() +// calls and Updated() calls exceeded healthTimeout or not. +type ProxierHealthServer struct { listener listener httpFactory httpServerFactory clock clock.Clock addr string healthTimeout time.Duration - recorder events.EventRecorder - nodeRef *v1.ObjectReference - lastUpdated atomic.Value - oldestPendingQueued atomic.Value - nodeEligible atomic.Bool + lock sync.RWMutex + lastUpdatedMap map[v1.IPFamily]time.Time + oldestPendingQueuedMap map[v1.IPFamily]time.Time + nodeEligible bool } // NewProxierHealthServer returns a proxier health http server. -func NewProxierHealthServer(addr string, healthTimeout time.Duration, recorder events.EventRecorder, nodeRef *v1.ObjectReference) ProxierHealthUpdater { - return newProxierHealthServer(stdNetListener{}, stdHTTPServerFactory{}, clock.RealClock{}, addr, healthTimeout, recorder, nodeRef) +func NewProxierHealthServer(addr string, healthTimeout time.Duration) *ProxierHealthServer { + return newProxierHealthServer(stdNetListener{}, stdHTTPServerFactory{}, clock.RealClock{}, addr, healthTimeout) } -func newProxierHealthServer(listener listener, httpServerFactory httpServerFactory, c clock.Clock, addr string, healthTimeout time.Duration, recorder events.EventRecorder, nodeRef *v1.ObjectReference) *proxierHealthServer { - hs := &proxierHealthServer{ +func newProxierHealthServer(listener listener, httpServerFactory httpServerFactory, c clock.Clock, addr string, healthTimeout time.Duration) *ProxierHealthServer { + return &ProxierHealthServer{ listener: listener, httpFactory: httpServerFactory, clock: c, addr: addr, healthTimeout: healthTimeout, - recorder: recorder, - nodeRef: nodeRef, + + lastUpdatedMap: make(map[v1.IPFamily]time.Time), + oldestPendingQueuedMap: make(map[v1.IPFamily]time.Time), + // The node is eligible (and thus the proxy healthy) while it's starting up + // and until we've processed the first node event that indicates the + // contrary. + nodeEligible: true, } - // The node is eligible (and thus the proxy healthy) while it's starting up - // and until we've processed the first node event that indicates the - // contrary. - hs.nodeEligible.Store(true) - return hs } -// Updated indicates that kube-proxy has successfully updated its backend, so it should -// be considered healthy now. -func (hs *proxierHealthServer) Updated() { - hs.oldestPendingQueued.Store(zeroTime) - hs.lastUpdated.Store(hs.clock.Now()) +// Updated should be called when the proxier of the given IP family has successfully updated +// the service rules to reflect the current state and should be considered healthy now. +func (hs *ProxierHealthServer) Updated(ipFamily v1.IPFamily) { + hs.lock.Lock() + defer hs.lock.Unlock() + delete(hs.oldestPendingQueuedMap, ipFamily) + hs.lastUpdatedMap[ipFamily] = hs.clock.Now() } -// QueuedUpdate indicates that the proxy has received changes from the apiserver but -// has not yet pushed them to its backend. If the proxy does not call Updated within the +// QueuedUpdate should be called when the proxier receives a Service or Endpoints event +// from API Server containing information that requires updating service rules. It +// indicates that the proxier for the given IP family has received changes but has not +// yet pushed them to its backend. If the proxier does not call Updated within the // healthTimeout time then it will be considered unhealthy. -func (hs *proxierHealthServer) QueuedUpdate() { - // Set oldestPendingQueued only if it's currently zero - hs.oldestPendingQueued.CompareAndSwap(zeroTime, hs.clock.Now()) +func (hs *ProxierHealthServer) QueuedUpdate(ipFamily v1.IPFamily) { + hs.lock.Lock() + defer hs.lock.Unlock() + // Set oldestPendingQueuedMap[ipFamily] only if it's currently unset + if _, set := hs.oldestPendingQueuedMap[ipFamily]; !set { + hs.oldestPendingQueuedMap[ipFamily] = hs.clock.Now() + } } // IsHealthy returns only the proxier's health state, following the same // definition the HTTP server defines, but ignoring the state of the Node. -func (hs *proxierHealthServer) IsHealthy() bool { - isHealthy, _, _ := hs.isHealthy() +func (hs *ProxierHealthServer) IsHealthy() bool { + isHealthy, _ := hs.isHealthy() return isHealthy } -func (hs *proxierHealthServer) isHealthy() (bool, time.Time, time.Time) { - var oldestPendingQueued, lastUpdated time.Time - if val := hs.oldestPendingQueued.Load(); val != nil { - oldestPendingQueued = val.(time.Time) - } - if val := hs.lastUpdated.Load(); val != nil { - lastUpdated = val.(time.Time) - } +func (hs *ProxierHealthServer) isHealthy() (bool, time.Time) { + hs.lock.RLock() + defer hs.lock.RUnlock() + + var lastUpdated time.Time currentTime := hs.clock.Now() - healthy := false - switch { - case oldestPendingQueued.IsZero(): - // The proxy is healthy while it's starting up - // or the proxy is fully synced. - healthy = true - case currentTime.Sub(oldestPendingQueued) < hs.healthTimeout: - // There's an unprocessed update queued, but it's not late yet - healthy = true + for ipFamily, proxierLastUpdated := range hs.lastUpdatedMap { + + if proxierLastUpdated.After(lastUpdated) { + lastUpdated = proxierLastUpdated + } + + if _, set := hs.oldestPendingQueuedMap[ipFamily]; !set { + // the proxier is healthy while it's starting up + // or the proxier is fully synced. + continue + } + + if currentTime.Sub(hs.oldestPendingQueuedMap[ipFamily]) < hs.healthTimeout { + // there's an unprocessed update queued for this proxier, but it's not late yet. + continue + } + return false, proxierLastUpdated } - return healthy, lastUpdated, currentTime + return true, lastUpdated } -func (hs *proxierHealthServer) SyncNode(node *v1.Node) { +// SyncNode syncs the node and determines if it is eligible or not. Eligible is +// defined as being: not tainted by ToBeDeletedTaint and not deleted. +func (hs *ProxierHealthServer) SyncNode(node *v1.Node) { + hs.lock.Lock() + defer hs.lock.Unlock() + if !node.DeletionTimestamp.IsZero() { - hs.nodeEligible.Store(false) + hs.nodeEligible = false return } for _, taint := range node.Spec.Taints { if taint.Key == ToBeDeletedTaint { - hs.nodeEligible.Store(false) + hs.nodeEligible = false return } } - hs.nodeEligible.Store(true) + hs.nodeEligible = true +} + +// NodeEligible returns nodeEligible field of ProxierHealthServer. +func (hs *ProxierHealthServer) NodeEligible() bool { + hs.lock.RLock() + defer hs.lock.RUnlock() + return hs.nodeEligible } // Run starts the healthz HTTP server and blocks until it exits. -func (hs *proxierHealthServer) Run() error { +func (hs *ProxierHealthServer) Run() error { serveMux := http.NewServeMux() serveMux.Handle("/healthz", healthzHandler{hs: hs}) serveMux.Handle("/livez", livezHandler{hs: hs}) @@ -166,12 +170,7 @@ func (hs *proxierHealthServer) Run() error { listener, err := hs.listener.Listen(hs.addr) if err != nil { - msg := fmt.Sprintf("failed to start proxier healthz on %s: %v", hs.addr, err) - // TODO(thockin): move eventing back to caller - if hs.recorder != nil { - hs.recorder.Eventf(hs.nodeRef, nil, api.EventTypeWarning, "FailedToStartProxierHealthcheck", "StartKubeProxy", msg) - } - return fmt.Errorf("%v", msg) + return fmt.Errorf("failed to start proxier healthz on %s: %v", hs.addr, err) } klog.V(3).InfoS("Starting healthz HTTP server", "address", hs.addr) @@ -183,12 +182,14 @@ func (hs *proxierHealthServer) Run() error { } type healthzHandler struct { - hs *proxierHealthServer + hs *ProxierHealthServer } func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - nodeEligible := h.hs.nodeEligible.Load() - healthy, lastUpdated, currentTime := h.hs.isHealthy() + nodeEligible := h.hs.NodeEligible() + healthy, lastUpdated := h.hs.isHealthy() + currentTime := h.hs.clock.Now() + healthy = healthy && nodeEligible resp.Header().Set("Content-Type", "application/json") resp.Header().Set("X-Content-Type-Options", "nosniff") @@ -209,11 +210,12 @@ func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { } type livezHandler struct { - hs *proxierHealthServer + hs *ProxierHealthServer } func (h livezHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - healthy, lastUpdated, currentTime := h.hs.isHealthy() + healthy, lastUpdated := h.hs.isHealthy() + currentTime := h.hs.clock.Now() resp.Header().Set("Content-Type", "application/json") resp.Header().Set("X-Content-Type-Options", "nosniff") if !healthy { diff --git a/pkg/proxy/iptables/number_generated_rules_test.go b/pkg/proxy/iptables/number_generated_rules_test.go index 4d75bdee8782f..e160d5407f155 100644 --- a/pkg/proxy/iptables/number_generated_rules_test.go +++ b/pkg/proxy/iptables/number_generated_rules_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // kube-proxy generates iptables rules to forward traffic from Services to Endpoints @@ -361,8 +361,6 @@ func generateServiceEndpoints(nServices, nEndpoints int, epsFunc func(eps *disco baseEp := netutils.BigForIP(netutils.ParseIPSloppy("172.16.0.1")) epPort := 8080 - tcpProtocol := v1.ProtocolTCP - eps := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "ep", @@ -371,9 +369,9 @@ func generateServiceEndpoints(nServices, nEndpoints int, epsFunc func(eps *disco AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{}, Ports: []discovery.EndpointPort{{ - Name: pointer.String(fmt.Sprintf("%d", epPort)), - Port: pointer.Int32(int32(epPort)), - Protocol: &tcpProtocol, + Name: ptr.To(fmt.Sprintf("%d", epPort)), + Port: ptr.To(int32(epPort)), + Protocol: ptr.To(v1.ProtocolTCP), }}, } diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index e8442ff9632e4..9ecc3ac5090eb 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -91,7 +91,6 @@ const ( ) const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet" -const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables" const sysctlNFConntrackTCPBeLiberal = "net/netfilter/nf_conntrack_tcp_be_liberal" // internal struct for string service information @@ -123,28 +122,31 @@ func newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *pro } // internal struct for endpoints information -type endpointsInfo struct { +type endpointInfo struct { *proxy.BaseEndpointInfo ChainName utiliptables.Chain } -// returns a new proxy.Endpoint which abstracts a endpointsInfo +// returns a new proxy.Endpoint which abstracts a endpointInfo func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo, svcPortName *proxy.ServicePortName) proxy.Endpoint { - return &endpointsInfo{ + return &endpointInfo{ BaseEndpointInfo: baseInfo, - ChainName: servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(svcPortName.Protocol)), baseInfo.Endpoint), + ChainName: servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(svcPortName.Protocol)), baseInfo.String()), } } // Proxier is an iptables based proxy for connections between a localhost:lport // and services that provide the actual backends. type Proxier struct { + // ipFamily defines the IP family which this proxier is tracking. + ipFamily v1.IPFamily + // endpointsChanges and serviceChanges contains all changes to endpoints and // services that happened since iptables was synced. For a single object, // changes are accumulated, i.e. previous is state from before all of them, // current is state after applying all of those. - endpointsChanges *proxy.EndpointChangeTracker + endpointsChanges *proxy.EndpointsChangeTracker serviceChanges *proxy.ServiceChangeTracker mu sync.Mutex // protects the following fields @@ -173,7 +175,7 @@ type Proxier struct { recorder events.EventRecorder serviceHealthServer healthcheck.ServiceHealthServer - healthzServer healthcheck.ProxierHealthUpdater + healthzServer *healthcheck.ProxierHealthServer // Since converting probabilities (floats) to strings is expensive // and we are using only probabilities in the format of 1/n, we are @@ -229,8 +231,9 @@ func NewProxier(ipFamily v1.IPFamily, hostname string, nodeIP net.IP, recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, + healthzServer *healthcheck.ProxierHealthServer, nodePortAddressStrings []string, + initOnly bool, ) (*Proxier, error) { nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) @@ -254,11 +257,10 @@ func NewProxier(ipFamily v1.IPFamily, conntrackTCPLiberal = true klog.InfoS("nf_conntrack_tcp_be_liberal set, not installing DROP rules for INVALID packets") } - // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers - // are connected to a Linux bridge (but not SDN bridges). Until most - // plugins handle this, log when config is missing - if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - klog.InfoS("Missing br-netfilter module or unset sysctl br-nf-call-iptables, proxy may not work as intended") + + if initOnly { + klog.InfoS("System initialized and --init-only specified") + return nil, nil } // Generate the masquerade mark to use for SNAT rules. @@ -269,10 +271,11 @@ func NewProxier(ipFamily v1.IPFamily, serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) proxier := &Proxier{ + ipFamily: ipFamily, svcPortMap: make(proxy.ServicePortMap), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(hostname, newEndpointInfo, ipFamily, recorder, nil), + endpointsChanges: proxy.NewEndpointsChangeTracker(hostname, newEndpointInfo, ipFamily, recorder, nil), needFullSync: true, syncPeriod: syncPeriod, iptables: ipt, @@ -331,23 +334,27 @@ func NewDualStackProxier( hostname string, nodeIPs map[v1.IPFamily]net.IP, recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, + healthzServer *healthcheck.ProxierHealthServer, nodePortAddresses []string, + initOnly bool, ) (proxy.Provider, error) { // Create an ipv4 instance of the single-stack proxier ipv4Proxier, err := NewProxier(v1.IPv4Protocol, ipt[0], sysctl, exec, syncPeriod, minSyncPeriod, masqueradeAll, localhostNodePorts, masqueradeBit, localDetectors[0], hostname, - nodeIPs[v1.IPv4Protocol], recorder, healthzServer, nodePortAddresses) + nodeIPs[v1.IPv4Protocol], recorder, healthzServer, nodePortAddresses, initOnly) if err != nil { return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) } ipv6Proxier, err := NewProxier(v1.IPv6Protocol, ipt[1], sysctl, exec, syncPeriod, minSyncPeriod, masqueradeAll, false, masqueradeBit, localDetectors[1], hostname, - nodeIPs[v1.IPv6Protocol], recorder, healthzServer, nodePortAddresses) + nodeIPs[v1.IPv6Protocol], recorder, healthzServer, nodePortAddresses, initOnly) if err != nil { return nil, fmt.Errorf("unable to create ipv6 proxier: %v", err) } + if initOnly { + return nil, nil + } return metaproxier.NewMetaProxier(ipv4Proxier, ipv6Proxier), nil } @@ -493,7 +500,7 @@ func (proxier *Proxier) probability(n int) string { // Sync is called to synchronize the proxier state to iptables as soon as possible. func (proxier *Proxier) Sync() { if proxier.healthzServer != nil { - proxier.healthzServer.QueuedUpdate() + proxier.healthzServer.QueuedUpdate(proxier.ipFamily) } metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() proxier.syncRunner.Run() @@ -503,7 +510,7 @@ func (proxier *Proxier) Sync() { func (proxier *Proxier) SyncLoop() { // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { - proxier.healthzServer.Updated() + proxier.healthzServer.Updated(proxier.ipFamily) } // synthesize "last change queued" time as the informers are syncing. @@ -959,7 +966,7 @@ func (proxier *Proxier) syncProxyRules() { // Note the endpoint chains that will be used for _, ep := range allLocallyReachableEndpoints { - if epInfo, ok := ep.(*endpointsInfo); ok { + if epInfo, ok := ep.(*endpointInfo); ok { activeNATChains[epInfo.ChainName] = true } } @@ -1352,9 +1359,9 @@ func (proxier *Proxier) syncProxyRules() { // Generate the per-endpoint chains. for _, ep := range allLocallyReachableEndpoints { - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { - klog.ErrorS(nil, "Failed to cast endpointsInfo", "endpointsInfo", ep) + klog.ErrorS(nil, "Failed to cast endpointInfo", "endpointInfo", ep) continue } @@ -1376,7 +1383,7 @@ func (proxier *Proxier) syncProxyRules() { args = append(args, "-m", "recent", "--name", string(endpointChain), "--set") } // DNAT to final destination. - args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.Endpoint) + args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.String()) natRules.Write(args) } } @@ -1541,7 +1548,7 @@ func (proxier *Proxier) syncProxyRules() { metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal").Set(float64(serviceNoLocalEndpointsTotalInternal)) metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external").Set(float64(serviceNoLocalEndpointsTotalExternal)) if proxier.healthzServer != nil { - proxier.healthzServer.Updated() + proxier.healthzServer.Updated(proxier.ipFamily) } metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() @@ -1563,11 +1570,11 @@ func (proxier *Proxier) writeServiceToEndpointRules(natRules proxyutil.LineBuffe // First write session affinity rules, if applicable. if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP { for _, ep := range endpoints { - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { continue } - comment := fmt.Sprintf(`"%s -> %s"`, svcPortNameString, epInfo.Endpoint) + comment := fmt.Sprintf(`"%s -> %s"`, svcPortNameString, epInfo.String()) args = append(args[:0], "-A", string(svcChain), @@ -1585,11 +1592,11 @@ func (proxier *Proxier) writeServiceToEndpointRules(natRules proxyutil.LineBuffe // Now write loadbalancing rules. numEndpoints := len(endpoints) for i, ep := range endpoints { - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { continue } - comment := fmt.Sprintf(`"%s -> %s"`, svcPortNameString, epInfo.Endpoint) + comment := fmt.Sprintf(`"%s -> %s"`, svcPortNameString, epInfo.String()) args = append(args[:0], "-A", string(svcChain)) args = proxier.appendServiceCommentLocked(args, comment) diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index ca00461124b82..ccc52091d2c95 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -57,15 +57,9 @@ import ( "k8s.io/utils/exec" fakeexec "k8s.io/utils/exec/testing" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) -// (Note that we don't use UDP ports in most of the tests here, because if you create UDP -// services you have to deal with setting up the FakeExec correctly for the conntrack -// cleanup calls.) -var tcpProtocol = v1.ProtocolTCP -var sctpProtocol = v1.ProtocolSCTP - func TestDeleteEndpointConnections(t *testing.T) { const ( UDP = v1.ProtocolUDP @@ -219,9 +213,9 @@ func TestDeleteEndpointConnections(t *testing.T) { Addresses: []string{endpointIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tc.protocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(tc.protocol), }} }) @@ -288,9 +282,15 @@ func TestDeleteEndpointConnections(t *testing.T) { const testHostname = "test-hostname" const testNodeIP = "192.168.0.2" +const testNodeIPAlt = "192.168.1.2" +const testExternalIP = "192.168.99.11" +const testNodeIPv6 = "2001:db8::1" +const testNodeIPv6Alt = "2001:db8:1::2" const testExternalClient = "203.0.113.2" const testExternalClientBlocked = "203.0.113.130" +var testNodeIPs = []string{testNodeIP, testNodeIPAlt, testExternalIP, testNodeIPv6, testNodeIPv6Alt} + func NewFakeProxier(ipt utiliptables.Interface) *Proxier { // TODO: Call NewProxier after refactoring out the goroutine // invocation into a Run() method. @@ -298,7 +298,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { podCIDR := "10.0.0.0/8" if ipt.IsIPv6() { ipfamily = v1.IPv6Protocol - podCIDR = "fd00::/64" + podCIDR = "fd00:10::/64" } detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR) @@ -312,9 +312,10 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0} addrs1 := []net.Addr{ &net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)}, - // (This IP never actually gets used; it's only here to test that it gets - // filtered out correctly in the IPv4 nodeport tests.) - &net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, + &net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPAlt), Mask: net.CIDRMask(24, 32)}, + &net.IPNet{IP: netutils.ParseIPSloppy(testExternalIP), Mask: net.CIDRMask(24, 32)}, + &net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6), Mask: net.CIDRMask(64, 128)}, + &net.IPNet{IP: netutils.ParseIPSloppy(testNodeIPv6Alt), Mask: net.CIDRMask(64, 128)}, } networkInterfacer.AddInterfaceAddr(&itf1, addrs1) @@ -323,7 +324,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { svcPortMap: make(proxy.ServicePortMap), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil), endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil), + endpointsChanges: proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil), needFullSync: true, iptables: ipt, masqueradeMark: "0x4000", @@ -1312,31 +1313,29 @@ func assertIPTablesRulesEqual(t *testing.T, line int, checkConsistency bool, exp } } -// assertIPTablesRulesNotEqual asserts that the generated rules in result DON'T match the -// rules in expected, ignoring irrelevant ordering differences. -func assertIPTablesRulesNotEqual(t *testing.T, line int, expected, result string) { +// assertIPTablesChainEqual asserts that the indicated chain in the indicated table in +// result contains exactly the rules in expected (in that order). +func assertIPTablesChainEqual(t *testing.T, line int, table utiliptables.Table, chain utiliptables.Chain, expected, result string) { expected = strings.TrimLeft(expected, " \t\n") - result, err := sortIPTablesRules(strings.TrimLeft(result, " \t\n")) + dump, err := iptablestest.ParseIPTablesDump(strings.TrimLeft(result, " \t\n")) if err != nil { t.Fatalf("%s", err) } + result = "" + if ch, _ := dump.GetChain(table, chain); ch != nil { + for _, rule := range ch.Rules { + result += rule.Raw + "\n" + } + } + lineStr := "" if line != 0 { lineStr = fmt.Sprintf(" (from line %d)", line) } - if cmp.Equal(expected, result) { - t.Errorf("rules do not differ%s:\nfull result:\n```\n%s```", lineStr, result) - } - - err = checkIPTablesRuleJumps(expected) - if err != nil { - t.Fatalf("%s", err) - } - err = checkIPTablesRuleJumps(result) - if err != nil { - t.Fatalf("%s", err) + if diff := cmp.Diff(expected, result); diff != "" { + t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result) } } @@ -1369,9 +1368,9 @@ func addressMatches(t *testing.T, address *iptablestest.IPTablesValue, ipStr str // iptablesTracer holds data used while virtually tracing a packet through a set of // iptables rules type iptablesTracer struct { - ipt *iptablestest.FakeIPTables - nodeIP string - t *testing.T + ipt *iptablestest.FakeIPTables + localIPs sets.Set[string] + t *testing.T // matches accumulates the list of rules that were matched, for debugging purposes. matches []string @@ -1385,21 +1384,23 @@ type iptablesTracer struct { markMasq bool } -// newIPTablesTracer creates an iptablesTracer. nodeIP is the IP to treat as the local -// node IP (for determining whether rules with "--src-type LOCAL" or "--dst-type LOCAL" +// newIPTablesTracer creates an iptablesTracer. nodeIPs are the IPs to treat as local +// node IPs (for determining whether rules with "--src-type LOCAL" or "--dst-type LOCAL" // match). -func newIPTablesTracer(t *testing.T, ipt *iptablestest.FakeIPTables, nodeIP string) *iptablesTracer { +func newIPTablesTracer(t *testing.T, ipt *iptablestest.FakeIPTables, nodeIPs []string) *iptablesTracer { + localIPs := sets.New("127.0.0.1", "::1") + localIPs.Insert(nodeIPs...) + return &iptablesTracer{ - ipt: ipt, - nodeIP: nodeIP, - t: t, + ipt: ipt, + localIPs: localIPs, + t: t, } } // ruleMatches checks if the given iptables rule matches (at least probabilistically) a -// packet with the given sourceIP, destIP, and destPort. (Note that protocol is currently -// ignored.) -func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, destIP, destPort string) bool { +// packet with the given sourceIP, destIP, and destPort. +func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, protocol, destIP, destPort string) bool { // The sub-rules within an iptables rule are ANDed together, so the rule only // matches if all of them match. So go through the subrules, and if any of them // DON'T match, then fail. @@ -1409,7 +1410,7 @@ func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, des } if rule.SourceType != nil { addrtype := "not-matched" - if sourceIP == tracer.nodeIP || sourceIP == "127.0.0.1" { + if tracer.localIPs.Has(sourceIP) { addrtype = "LOCAL" } if !rule.SourceType.Matches(addrtype) { @@ -1417,12 +1418,16 @@ func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, des } } + if rule.Protocol != nil && !rule.Protocol.Matches(protocol) { + return false + } + if rule.DestinationAddress != nil && !addressMatches(tracer.t, rule.DestinationAddress, destIP) { return false } if rule.DestinationType != nil { addrtype := "not-matched" - if destIP == tracer.nodeIP || destIP == "127.0.0.1" { + if tracer.localIPs.Has(destIP) { addrtype = "LOCAL" } if !rule.DestinationType.Matches(addrtype) { @@ -1444,7 +1449,7 @@ func (tracer *iptablesTracer) ruleMatches(rule *iptablestest.Rule, sourceIP, des // runChain runs the given packet through the rules in the given table and chain, updating // tracer's internal state accordingly. It returns true if it hits a terminal action. -func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptables.Chain, sourceIP, destIP, destPort string) bool { +func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptables.Chain, sourceIP, protocol, destIP, destPort string) bool { c, _ := tracer.ipt.Dump.GetChain(table, chain) if c == nil { return false @@ -1455,7 +1460,7 @@ func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptab continue } - if !tracer.ruleMatches(rule, sourceIP, destIP, destPort) { + if !tracer.ruleMatches(rule, sourceIP, protocol, destIP, destPort) { continue } // record the matched rule for debugging purposes @@ -1478,7 +1483,7 @@ func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptab default: // We got a "-j KUBE-SOMETHING", so process that chain - terminated := tracer.runChain(table, utiliptables.Chain(rule.Jump.Value), sourceIP, destIP, destPort) + terminated := tracer.runChain(table, utiliptables.Chain(rule.Jump.Value), sourceIP, protocol, destIP, destPort) // If the subchain hit a terminal rule AND the rule that sent us // to that chain was non-probabilistic, then this chain terminates @@ -1494,18 +1499,19 @@ func (tracer *iptablesTracer) runChain(table utiliptables.Table, chain utiliptab return false } -// tracePacket determines what would happen to a packet with the given sourceIP, destIP, -// and destPort, given the indicated iptables ruleData. nodeIP is the local node IP (for -// rules matching "LOCAL"). +// tracePacket determines what would happen to a packet with the given sourceIP, protocol, +// destIP, and destPort, given the indicated iptables ruleData. nodeIP is the local node +// IP (for rules matching "LOCAL"). (The protocol value should be lowercase as in iptables +// rules, not uppercase as in corev1.) // // The return values are: an array of matched rules (for debugging), the final packet // destinations (a comma-separated list of IPs, or one of the special targets "ACCEPT", // "DROP", or "REJECT"), and whether the packet would be masqueraded. -func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, destIP, destPort, nodeIP string) ([]string, string, bool) { - tracer := newIPTablesTracer(t, ipt, nodeIP) +func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, protocol, destIP, destPort string, nodeIPs []string) ([]string, string, bool) { + tracer := newIPTablesTracer(t, ipt, nodeIPs) // nat:PREROUTING goes first - tracer.runChain(utiliptables.TableNAT, utiliptables.ChainPrerouting, sourceIP, destIP, destPort) + tracer.runChain(utiliptables.TableNAT, utiliptables.ChainPrerouting, sourceIP, protocol, destIP, destPort) // After the PREROUTING rules run, pending DNATs are processed (which would affect // the destination IP that later rules match against). @@ -1517,10 +1523,10 @@ func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, destIP, // inbound, outbound, or intra-host packet, which we don't know. So we just run // the interesting tables manually. (Theoretically this could cause conflicts in // the future in which case we'd have to do something more complicated.) - tracer.runChain(utiliptables.TableFilter, kubeServicesChain, sourceIP, destIP, destPort) - tracer.runChain(utiliptables.TableFilter, kubeExternalServicesChain, sourceIP, destIP, destPort) - tracer.runChain(utiliptables.TableFilter, kubeNodePortsChain, sourceIP, destIP, destPort) - tracer.runChain(utiliptables.TableFilter, kubeProxyFirewallChain, sourceIP, destIP, destPort) + tracer.runChain(utiliptables.TableFilter, kubeServicesChain, sourceIP, protocol, destIP, destPort) + tracer.runChain(utiliptables.TableFilter, kubeExternalServicesChain, sourceIP, protocol, destIP, destPort) + tracer.runChain(utiliptables.TableFilter, kubeNodePortsChain, sourceIP, protocol, destIP, destPort) + tracer.runChain(utiliptables.TableFilter, kubeProxyFirewallChain, sourceIP, protocol, destIP, destPort) // Finally, the nat:POSTROUTING rules run, but the only interesting thing that // happens there is that the masquerade mark gets turned into actual masquerading. @@ -1531,20 +1537,25 @@ func tracePacket(t *testing.T, ipt *iptablestest.FakeIPTables, sourceIP, destIP, type packetFlowTest struct { name string sourceIP string + protocol v1.Protocol destIP string destPort int output string masq bool } -func runPacketFlowTests(t *testing.T, line int, ipt *iptablestest.FakeIPTables, nodeIP string, testCases []packetFlowTest) { +func runPacketFlowTests(t *testing.T, line int, ipt *iptablestest.FakeIPTables, nodeIPs []string, testCases []packetFlowTest) { lineStr := "" if line != 0 { lineStr = fmt.Sprintf(" (from line %d)", line) } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - matches, output, masq := tracePacket(t, ipt, tc.sourceIP, tc.destIP, fmt.Sprintf("%d", tc.destPort), nodeIP) + protocol := strings.ToLower(string(tc.protocol)) + if protocol == "" { + protocol = "tcp" + } + matches, output, masq := tracePacket(t, ipt, tc.sourceIP, protocol, tc.destIP, fmt.Sprintf("%d", tc.destPort), nodeIPs) var errors []string if output != tc.output { errors = append(errors, fmt.Sprintf("wrong output: expected %q got %q", tc.output, output)) @@ -1553,8 +1564,8 @@ func runPacketFlowTests(t *testing.T, line int, ipt *iptablestest.FakeIPTables, errors = append(errors, fmt.Sprintf("wrong masq: expected %v got %v", tc.masq, masq)) } if errors != nil { - t.Errorf("Test %q of a packet from %s to %s:%d%s got result:\n%s\n\nBy matching:\n%s\n\n", - tc.name, tc.sourceIP, tc.destIP, tc.destPort, lineStr, strings.Join(errors, "\n"), strings.Join(matches, "\n")) + t.Errorf("Test %q of a %s packet from %s to %s:%d%s got result:\n%s\n\nBy matching:\n%s\n\n", + tc.name, protocol, tc.sourceIP, tc.destIP, tc.destPort, lineStr, strings.Join(errors, "\n"), strings.Join(matches, "\n")) } }) } @@ -1679,7 +1690,7 @@ func TestTracePackets(t *testing.T) { t.Fatalf("Restore of test data failed: %v", err) } - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { name: "no match", sourceIP: "10.0.0.2", @@ -1733,11 +1744,9 @@ func TestTracePackets(t *testing.T) { }) } -// TestOverallIPTablesRulesWithMultipleServices creates 4 types of services: ClusterIP, -// LoadBalancer, ExternalIP and NodePort and verifies if the NAT table rules created -// are exactly the same as what is expected. This test provides an overall view of how -// the NAT table rules look like with the different jumps. -func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { +// TestOverallIPTablesRules creates a variety of services and verifies that the generated +// rules are exactly as expected. +func TestOverallIPTablesRules(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) metrics.RegisterMetrics() @@ -1792,7 +1801,8 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { TargetPort: intstr.FromInt32(80), }} }), - // create LoadBalancer service with Cluster traffic policy and source ranges + // create LoadBalancer service with Cluster traffic policy, source ranges, + // and session affinity makeTestService("ns5", "svc5", func(svc *v1.Service) { svc.Spec.Type = "LoadBalancer" svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster @@ -1810,6 +1820,13 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { // Extra whitespace to ensure that invalid value will not result // in a crash, for backward compatibility. svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"} + + svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP + svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ + ClientIP: &v1.ClientIPConfig{ + TimeoutSeconds: ptr.To[int32](10800), + }, + } }), // create ClusterIP service with no endpoints makeTestService("ns6", "svc6", func(svc *v1.Service) { @@ -1831,9 +1848,9 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), // create Local LoadBalancer endpoints. Note that since we aren't setting @@ -1844,9 +1861,9 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { Addresses: []string{"10.180.0.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), // create NodePort service endpoints @@ -1856,9 +1873,9 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { Addresses: []string{"10.180.0.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), // create ExternalIP service endpoints @@ -1868,12 +1885,12 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { Addresses: []string{"10.180.0.4"}, }, { Addresses: []string{"10.180.0.5"}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), // create Cluster LoadBalancer endpoints @@ -1883,9 +1900,9 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { Addresses: []string{"10.180.0.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -1963,7 +1980,7 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { -A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ -A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80 -A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ - -A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80 + -A KUBE-SEP-I77PXRDZVX7PMWMN -m comment --comment ns5/svc5:p80 -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80 -A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ -A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80 -A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ @@ -1978,6 +1995,7 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { -A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ -A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 cluster IP" -m tcp -p tcp -d 172.30.0.45 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ + -A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -m recent --name KUBE-SEP-I77PXRDZVX7PMWMN --rcheck --seconds 10800 --reap -j KUBE-SEP-I77PXRDZVX7PMWMN -A KUBE-SVC-NUKIZ6OKUXPJNT4C -m comment --comment "ns5/svc5:p80 -> 10.180.0.3:80" -j KUBE-SEP-I77PXRDZVX7PMWMN -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP @@ -1996,11 +2014,16 @@ func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) { } } -func TestClusterIPReject(t *testing.T) { +// TestNoEndpointsReject tests that a service with no endpoints rejects connections to +// its ClusterIP, ExternalIPs, NodePort, and LoadBalancer IP. +func TestNoEndpointsReject(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) svcIP := "172.30.0.41" svcPort := 80 + svcNodePort := 3001 + svcExternalIPs := "192.168.99.11" + svcLBIP := "1.2.3.4" svcPortName := proxy.ServicePortName{ NamespacedName: makeNSN("ns1", "svc1"), Port: "p80", @@ -2008,132 +2031,172 @@ func TestClusterIPReject(t *testing.T) { makeServiceMap(fp, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { + svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ClusterIP = svcIP + svc.Spec.ExternalIPs = []string{svcExternalIPs} svc.Spec.Ports = []v1.ServicePort{{ Name: svcPortName.Port, - Port: int32(svcPort), Protocol: v1.ProtocolTCP, + Port: int32(svcPort), + NodePort: int32(svcNodePort), + }} + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: svcLBIP, }} }), ) fp.syncProxyRules() - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `) - - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { - name: "cluster IP rejected", + name: "pod to cluster IP with no endpoints", sourceIP: "10.0.0.2", - destIP: "172.30.0.41", - destPort: 80, + destIP: svcIP, + destPort: svcPort, + output: "REJECT", + }, + { + name: "external to external IP with no endpoints", + sourceIP: testExternalClient, + destIP: svcExternalIPs, + destPort: svcPort, + output: "REJECT", + }, + { + name: "pod to NodePort with no endpoints", + sourceIP: "10.0.0.2", + destIP: testNodeIP, + destPort: svcNodePort, + output: "REJECT", + }, + { + name: "external to NodePort with no endpoints", + sourceIP: testExternalClient, + destIP: testNodeIP, + destPort: svcNodePort, + output: "REJECT", + }, + { + name: "pod to LoadBalancer IP with no endpoints", + sourceIP: "10.0.0.2", + destIP: svcLBIP, + destPort: svcPort, + output: "REJECT", + }, + { + name: "external to LoadBalancer IP with no endpoints", + sourceIP: testExternalClient, + destIP: svcLBIP, + destPort: svcPort, output: "REJECT", }, }) } -func TestClusterIPEndpointsMore(t *testing.T) { +// TestClusterIPGeneral tests various basic features of a ClusterIP service +func TestClusterIPGeneral(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolSCTP, - } makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.ClusterIP = svcIP + makeTestService("ns1", "svc1", func(svc *v1.Service) { + svc.Spec.ClusterIP = "172.30.0.41" svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolSCTP, + Name: "http", + Port: 80, + Protocol: v1.ProtocolTCP, }} }), + makeTestService("ns2", "svc2", func(svc *v1.Service) { + svc.Spec.ClusterIP = "172.30.0.42" + svc.Spec.Ports = []v1.ServicePort{ + { + Name: "http", + Port: 80, + Protocol: v1.ProtocolTCP, + }, + { + Name: "https", + Port: 443, + Protocol: v1.ProtocolTCP, + TargetPort: intstr.FromInt32(8443), + }, + { + // Of course this should really be UDP, but if we + // create a service with UDP ports, the Proxier will + // try to do conntrack cleanup and we'd have to set + // the FakeExec up to be able to deal with that... + Name: "dns-sctp", + Port: 53, + Protocol: v1.ProtocolSCTP, + }, + { + Name: "dns-tcp", + Port: 53, + Protocol: v1.ProtocolTCP, + // We use TargetPort on TCP but not SCTP to help + // disambiguate the output. + TargetPort: intstr.FromInt32(5353), + }, + } + }), ) - epIP := "10.180.0.1" populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { + makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP}, + Addresses: []string{"10.180.0.1"}, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &sctpProtocol, + Name: ptr.To("http"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), + makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{ + { + Addresses: []string{"10.180.0.1"}, + NodeName: ptr.To(testHostname), + }, + { + Addresses: []string{"10.180.2.1"}, + NodeName: ptr.To("host2"), + }, + } + eps.Ports = []discovery.EndpointPort{ + { + Name: ptr.To("http"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), + }, + { + Name: ptr.To("https"), + Port: ptr.To[int32](8443), + Protocol: ptr.To(v1.ProtocolTCP), + }, + { + Name: ptr.To("dns-sctp"), + Port: ptr.To[int32](53), + Protocol: ptr.To(v1.ProtocolSCTP), + }, + { + Name: ptr.To("dns-tcp"), + Port: ptr.To[int32](5353), + Protocol: ptr.To(v1.ProtocolTCP), + }, + } + }), ) fp.syncProxyRules() - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-RFW33Y6OHVBQ4W3M - [0:0] - :KUBE-SVC-GFCIFIA5VTFSTMSM - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m sctp -p sctp -d 172.30.0.41 --dport 80 -j KUBE-SVC-GFCIFIA5VTFSTMSM - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-RFW33Y6OHVBQ4W3M -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-RFW33Y6OHVBQ4W3M -m comment --comment ns1/svc1:p80 -m sctp -p sctp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SVC-GFCIFIA5VTFSTMSM -m comment --comment "ns1/svc1:p80 cluster IP" -m sctp -p sctp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-GFCIFIA5VTFSTMSM -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-RFW33Y6OHVBQ4W3M - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { - name: "cluster IP accepted", + name: "simple clusterIP", sourceIP: "10.180.0.2", destIP: "172.30.0.41", destPort: 80, @@ -2148,6 +2211,55 @@ func TestClusterIPEndpointsMore(t *testing.T) { output: "10.180.0.1:80", masq: true, }, + { + name: "clusterIP with multiple endpoints", + sourceIP: "10.180.0.2", + destIP: "172.30.0.42", + destPort: 80, + output: "10.180.0.1:80, 10.180.2.1:80", + masq: false, + }, + { + name: "clusterIP with TargetPort", + sourceIP: "10.180.0.2", + destIP: "172.30.0.42", + destPort: 443, + output: "10.180.0.1:8443, 10.180.2.1:8443", + masq: false, + }, + { + name: "clusterIP with TCP and SCTP on same port (TCP)", + sourceIP: "10.180.0.2", + protocol: v1.ProtocolTCP, + destIP: "172.30.0.42", + destPort: 53, + output: "10.180.0.1:5353, 10.180.2.1:5353", + masq: false, + }, + { + name: "clusterIP with TCP and SCTP on same port (SCTP)", + sourceIP: "10.180.0.2", + protocol: v1.ProtocolSCTP, + destIP: "172.30.0.42", + destPort: 53, + output: "10.180.0.1:53, 10.180.2.1:53", + masq: false, + }, + { + name: "TCP-only port does not match UDP traffic", + sourceIP: "10.180.0.2", + protocol: v1.ProtocolUDP, + destIP: "172.30.0.42", + destPort: 80, + output: "", + }, + { + name: "svc1 does not accept svc2's ports", + sourceIP: "10.180.0.2", + destIP: "172.30.0.41", + destPort: 443, + output: "", + }, }) } @@ -2196,65 +2308,16 @@ func TestLoadBalancer(t *testing.T) { Addresses: []string{epIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) fp.syncProxyRules() - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - -A KUBE-PROXY-FIREWALL -m comment --comment "ns1/svc1:p80 traffic not accepted by KUBE-FW-XPGD46QRK7WJZT7O" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP - -A KUBE-PROXY-FIREWALL -m comment --comment "ns1/svc1:p80 traffic not accepted by KUBE-FW-XPGD46QRK7WJZT7O" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j DROP - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-FW-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 192.168.0.0/24 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 1.2.3.4 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 5.6.7.8 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "other traffic to ns1/svc1:p80 will be dropped by KUBE-PROXY-FIREWALL" - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ - COMMIT - `) - - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { name: "pod to cluster IP", sourceIP: "10.0.0.2", @@ -2363,120 +2426,257 @@ func TestLoadBalancer(t *testing.T) { }) } -func TestNodePort(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcNodePort := 3001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } +// TestNodePorts tests NodePort services under various combinations of the +// --nodeport-addresses and --localhost-nodeports flags. +func TestNodePorts(t *testing.T) { + testCases := []struct { + name string - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - }), - ) + family v1.IPFamily + localhostNodePorts bool + nodePortAddresses []string - epIP := "10.180.0.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP}, - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) + // allowAltNodeIP is true if we expect NodePort traffic on the alternate + // node IP to be accepted + allowAltNodeIP bool - fp.syncProxyRules() + // expectFirewall is true if we expect KUBE-FIREWALL to be filled in with + // an anti-martian-packet rule + expectFirewall bool + }{ + { + name: "ipv4, localhost-nodeports enabled", - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) + family: v1.IPv4Protocol, + localhostNodePorts: true, + nodePortAddresses: nil, - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + allowAltNodeIP: true, + expectFirewall: true, + }, { - name: "pod to cluster IP", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: fmt.Sprintf("%s:%d", epIP, svcPort), - masq: false, + name: "ipv4, localhost-nodeports disabled", + + family: v1.IPv4Protocol, + localhostNodePorts: false, + nodePortAddresses: nil, + + allowAltNodeIP: true, + expectFirewall: false, }, { - name: "external to nodePort", - sourceIP: testExternalClient, - destIP: testNodeIP, - destPort: svcNodePort, - output: fmt.Sprintf("%s:%d", epIP, svcPort), - masq: true, + name: "ipv4, localhost-nodeports disabled, localhost in nodeport-addresses", + + family: v1.IPv4Protocol, + localhostNodePorts: false, + nodePortAddresses: []string{"192.168.0.0/24", "127.0.0.1/32"}, + + allowAltNodeIP: false, + expectFirewall: false, }, { - name: "node to nodePort", - sourceIP: testNodeIP, - destIP: testNodeIP, - destPort: svcNodePort, - output: fmt.Sprintf("%s:%d", epIP, svcPort), - masq: true, + name: "ipv4, localhost-nodeports enabled, multiple nodeport-addresses", + + family: v1.IPv4Protocol, + localhostNodePorts: false, + nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"}, + + allowAltNodeIP: true, + expectFirewall: false, }, { - name: "localhost to nodePort gets masqueraded", - sourceIP: "127.0.0.1", - destIP: "127.0.0.1", - destPort: svcNodePort, - output: fmt.Sprintf("%s:%d", epIP, svcPort), - masq: true, + name: "ipv6, localhost-nodeports enabled", + + family: v1.IPv6Protocol, + localhostNodePorts: true, + nodePortAddresses: nil, + + allowAltNodeIP: true, + expectFirewall: false, }, - }) + { + name: "ipv6, localhost-nodeports disabled", + + family: v1.IPv6Protocol, + localhostNodePorts: false, + nodePortAddresses: nil, + + allowAltNodeIP: true, + expectFirewall: false, + }, + { + name: "ipv6, localhost-nodeports disabled, multiple nodeport-addresses", + + family: v1.IPv6Protocol, + localhostNodePorts: false, + nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"}, + + allowAltNodeIP: false, + expectFirewall: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var ipt *iptablestest.FakeIPTables + var svcIP, epIP1, epIP2 string + if tc.family == v1.IPv4Protocol { + ipt = iptablestest.NewFake() + svcIP = "172.30.0.41" + epIP1 = "10.180.0.1" + epIP2 = "10.180.2.1" + } else { + ipt = iptablestest.NewIPv6Fake() + svcIP = "fd00:172:30::41" + epIP1 = "fd00:10:180::1" + epIP2 = "fd00:10:180::2:1" + } + fp := NewFakeProxier(ipt) + fp.localhostNodePorts = tc.localhostNodePorts + if tc.nodePortAddresses != nil { + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses) + } + + makeServiceMap(fp, + makeTestService("ns1", "svc1", func(svc *v1.Service) { + svc.Spec.Type = v1.ServiceTypeNodePort + svc.Spec.ClusterIP = svcIP + svc.Spec.Ports = []v1.ServicePort{{ + Name: "p80", + Port: 80, + Protocol: v1.ProtocolTCP, + NodePort: 3001, + }} + }), + ) + + populateEndpointSlices(fp, + makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { + if tc.family == v1.IPv4Protocol { + eps.AddressType = discovery.AddressTypeIPv4 + } else { + eps.AddressType = discovery.AddressTypeIPv6 + } + eps.Endpoints = []discovery.Endpoint{{ + Addresses: []string{epIP1}, + NodeName: nil, + }, { + Addresses: []string{epIP2}, + NodeName: ptr.To(testHostname), + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), + }} + }), + ) + + fp.syncProxyRules() + + var podIP, externalClientIP, nodeIP, altNodeIP, localhostIP string + if tc.family == v1.IPv4Protocol { + podIP = "10.0.0.2" + externalClientIP = testExternalClient + nodeIP = testNodeIP + altNodeIP = testNodeIPAlt + localhostIP = "127.0.0.1" + } else { + podIP = "fd00:10::2" + externalClientIP = "2600:5200::1" + nodeIP = testNodeIPv6 + altNodeIP = testNodeIPv6Alt + localhostIP = "::1" + } + output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80") + + // Basic tests are the same for all cases + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "pod to cluster IP", + sourceIP: podIP, + destIP: svcIP, + destPort: 80, + output: output, + masq: false, + }, + { + name: "external to nodePort", + sourceIP: externalClientIP, + destIP: nodeIP, + destPort: 3001, + output: output, + masq: true, + }, + { + name: "node to nodePort", + sourceIP: nodeIP, + destIP: nodeIP, + destPort: 3001, + output: output, + masq: true, + }, + }) + + // localhost to NodePort is only allowed in IPv4, and only if not disabled + if tc.family == v1.IPv4Protocol && tc.localhostNodePorts { + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "localhost to nodePort gets masqueraded", + sourceIP: localhostIP, + destIP: localhostIP, + destPort: 3001, + output: output, + masq: true, + }, + }) + } else { + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "localhost to nodePort is ignored", + sourceIP: localhostIP, + destIP: localhostIP, + destPort: 3001, + output: "", + }, + }) + } + + // NodePort on altNodeIP should be allowed, unless + // nodePortAddressess excludes altNodeIP + if tc.allowAltNodeIP { + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "external to nodePort on secondary IP", + sourceIP: externalClientIP, + destIP: altNodeIP, + destPort: 3001, + output: output, + masq: true, + }, + }) + } else { + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "secondary nodeIP ignores NodePorts", + sourceIP: externalClientIP, + destIP: altNodeIP, + destPort: 3001, + output: "", + }, + }) + } + + // We have to check the firewall rule manually rather than via + // runPacketFlowTests(), because the packet tracer doesn't + // implement conntrack states. + var expected string + if tc.expectFirewall { + expected = "-A KUBE-FIREWALL -m comment --comment \"block incoming localnet connections\" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP\n" + } + assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeletFirewallChain, expected, fp.iptablesData.String()) + }) + } } func TestHealthCheckNodePort(t *testing.T) { @@ -2509,38 +2709,7 @@ func TestHealthCheckNodePort(t *testing.T) { makeServiceMap(fp, svc) fp.syncProxyRules() - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 127.0.0.1 -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `) - - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { name: "firewall accepts HealthCheckNodePort", sourceIP: "1.2.3.4", @@ -2554,7 +2723,7 @@ func TestHealthCheckNodePort(t *testing.T) { fp.OnServiceDelete(svc) fp.syncProxyRules() - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { name: "HealthCheckNodePort no longer has any rule", sourceIP: "1.2.3.4", @@ -2566,95 +2735,64 @@ func TestHealthCheckNodePort(t *testing.T) { } func TestDropInvalidRule(t *testing.T) { - for _, testcase := range []bool{false, true} { - t.Run(fmt.Sprintf("tcpLiberal %t", testcase), func(t *testing.T) { + for _, tcpLiberal := range []bool{false, true} { + t.Run(fmt.Sprintf("tcpLiberal %t", tcpLiberal), func(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) - fp.conntrackTCPLiberal = testcase + fp.conntrackTCPLiberal = tcpLiberal fp.syncProxyRules() - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP`) - if !testcase { - expected += "\n-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP" + var expected string + if !tcpLiberal { + expected = "-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP" } - expected += dedent.Dedent(` - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `) + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) + assertIPTablesChainEqual(t, getLine(), utiliptables.TableFilter, kubeForwardChain, expected, fp.iptablesData.String()) }) } } func TestMasqueradeRule(t *testing.T) { - for _, testcase := range []bool{false, true} { - ipt := iptablestest.NewFake().SetHasRandomFully(testcase) - fp := NewFakeProxier(ipt) - fp.syncProxyRules() + for _, randomFully := range []bool{false, true} { + t.Run(fmt.Sprintf("randomFully %t", randomFully), func(t *testing.T) { + ipt := iptablestest.NewFake().SetHasRandomFully(randomFully) + fp := NewFakeProxier(ipt) + fp.syncProxyRules() - expectedFmt := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s - COMMIT - `) - var expected string - if testcase { - expected = fmt.Sprintf(expectedFmt, " --random-fully") - } else { - expected = fmt.Sprintf(expectedFmt, "") - } - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) + expectedFmt := dedent.Dedent(` + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s + `) + var expected string + if randomFully { + expected = fmt.Sprintf(expectedFmt, " --random-fully") + } else { + expected = fmt.Sprintf(expectedFmt, "") + } + assertIPTablesChainEqual(t, getLine(), utiliptables.TableNAT, kubePostroutingChain, expected, fp.iptablesData.String()) + }) } } -func TestExternalIPsReject(t *testing.T) { +// TestExternalTrafficPolicyLocal tests that traffic to externally-facing IPs does not get +// masqueraded when using Local traffic policy. For traffic from external sources, that +// means it can also only be routed to local endpoints, but for traffic from internal +// sources, it gets routed to all endpoints. +func TestExternalTrafficPolicyLocal(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) + svcIP := "172.30.0.41" svcPort := 80 + svcNodePort := 3001 + svcHealthCheckNodePort := 30000 svcExternalIPs := "192.168.99.11" + svcLBIP := "1.2.3.4" svcPortName := proxy.ServicePortName{ NamespacedName: makeNSN("ns1", "svc1"), Port: "p80", @@ -2662,1189 +2800,137 @@ func TestExternalIPsReject(t *testing.T) { makeServiceMap(fp, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "ClusterIP" - svc.Spec.ClusterIP = svcIP - svc.Spec.ExternalIPs = []string{svcExternalIPs} - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt32(int32(svcPort)), - }} - }), - ) - - fp.syncProxyRules() - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ - { - name: "cluster IP with no endpoints", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: "REJECT", - }, - { - name: "external IP with no endpoints", - sourceIP: testExternalClient, - destIP: svcExternalIPs, - destPort: svcPort, - output: "REJECT", - }, - }) -} - -func TestOnlyLocalExternalIPs(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcExternalIPs := "192.168.99.11" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - svc.Spec.ClusterIP = svcIP - svc.Spec.ExternalIPs = []string{svcExternalIPs} - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt32(int32(svcPort)), - }} - }), - ) - epIP1 := "10.180.0.1" - epIP2 := "10.180.2.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "pod traffic for ns1/svc1:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ - { - name: "cluster IP hits both endpoints", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), - masq: false, - }, - { - name: "external IP hits only local endpoint, unmasqueraded", - sourceIP: testExternalClient, - destIP: svcExternalIPs, - destPort: svcPort, - output: fmt.Sprintf("%s:%d", epIP2, svcPort), - masq: false, - }, - }) -} - -// TestNonLocalExternalIPs tests if we add the masquerade rule into svcChain in order to -// SNAT packets to external IPs if externalTrafficPolicy is cluster and the traffic is NOT Local. -func TestNonLocalExternalIPs(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcExternalIPs := "192.168.99.11" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.ClusterIP = svcIP - svc.Spec.ExternalIPs = []string{svcExternalIPs} - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt32(int32(svcPort)), - }} - }), - ) - epIP1 := "10.180.0.1" - epIP2 := "10.180.2.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - NodeName: nil, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ - { - name: "pod to cluster IP", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), - masq: false, - }, - { - name: "external to external IP", - sourceIP: testExternalClient, - destIP: svcExternalIPs, - destPort: svcPort, - output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), - masq: true, - }, - }) -} - -func TestNodePortReject(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcNodePort := 3001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - }), - ) - - fp.syncProxyRules() - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ - { - name: "pod to cluster IP", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: "REJECT", - }, - { - name: "pod to NodePort", - sourceIP: "10.0.0.2", - destIP: testNodeIP, - destPort: svcNodePort, - output: "REJECT", - }, - { - name: "external to NodePort", - sourceIP: testExternalClient, - destIP: testNodeIP, - destPort: svcNodePort, - output: "REJECT", - }, - }) -} - -func TestLoadBalancerReject(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcNodePort := 3001 - svcHealthCheckNodePort := 30000 - svcLBIP := "1.2.3.4" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - svcSessionAffinityTimeout := int32(10800) - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "LoadBalancer" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort) - svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ - IP: svcLBIP, - }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP - svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ - ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout}, - } - }), - ) - - fp.syncProxyRules() - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ - { - name: "pod to cluster IP", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: "REJECT", - }, - { - name: "pod to LoadBalancer IP", - sourceIP: "10.0.0.2", - destIP: svcLBIP, - destPort: svcPort, - output: "REJECT", - }, - { - name: "external to LoadBalancer IP", - sourceIP: testExternalClient, - destIP: svcLBIP, - destPort: svcPort, - output: "REJECT", - }, - }) -} - -func TestOnlyLocalLoadBalancing(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - svcIP := "172.30.0.41" - svcPort := 80 - svcNodePort := 3001 - svcHealthCheckNodePort := 30000 - svcLBIP := "1.2.3.4" - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - svcSessionAffinityTimeout := int32(10800) - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "LoadBalancer" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort) - svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ - IP: svcLBIP, - }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP - svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ - ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout}, - } - }), - ) - - epIP1 := "10.180.0.1" - epIP2 := "10.180.2.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "pod traffic for ns1/svc1:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --set -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --rcheck --seconds 10800 --reap -j KUBE-SEP-SXIVWICOYRO3J4NJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - COMMIT - `) - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, getLine(), ipt, testNodeIP, []packetFlowTest{ - { - name: "pod to cluster IP hits both endpoints", - sourceIP: "10.0.0.2", - destIP: svcIP, - destPort: svcPort, - output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), - masq: false, - }, - { - name: "external to LB IP hits only local endpoint, unmasqueraded", - sourceIP: testExternalClient, - destIP: svcLBIP, - destPort: svcPort, - output: fmt.Sprintf("%s:%d", epIP2, svcPort), - masq: false, - }, - { - name: "external to NodePort hits only local endpoint, unmasqueraded", - sourceIP: testExternalClient, - destIP: testNodeIP, - destPort: svcNodePort, - output: fmt.Sprintf("%s:%d", epIP2, svcPort), - masq: false, - }, - }) -} - -func TestEnableLocalhostNodePortsIPv4(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - fp.localDetector = proxyutiliptables.NewNoOpLocalDetector() - fp.localhostNodePorts = true - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-6KG6DFHVBKBK53RU - [0:0] - :KUBE-SEP-KDGX2M2ONE25PSWH - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 30001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.69.0.10 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-6KG6DFHVBKBK53RU -m comment --comment ns1/svc1:p80 -s 10.244.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-6KG6DFHVBKBK53RU -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.244.0.1:80 - -A KUBE-SEP-KDGX2M2ONE25PSWH -m comment --comment ns1/svc1:p80 -s 10.244.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-KDGX2M2ONE25PSWH -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.244.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-6KG6DFHVBKBK53RU - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.2.1:80" -j KUBE-SEP-KDGX2M2ONE25PSWH - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.2.1:80" -j KUBE-SEP-KDGX2M2ONE25PSWH - COMMIT - `) - svcIP := "10.69.0.10" - svcPort := 80 - svcNodePort := 30001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - }), - ) - - epIP1 := "10.244.0.1" - epIP2 := "10.244.2.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - NodeName: nil, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) -} - -func TestDisableLocalhostNodePortsIPv4(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - fp.localDetector = proxyutiliptables.NewNoOpLocalDetector() - fp.localhostNodePorts = false - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-6KG6DFHVBKBK53RU - [0:0] - :KUBE-SEP-KDGX2M2ONE25PSWH - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 30001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.69.0.10 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL ! -d 127.0.0.0/8 -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-6KG6DFHVBKBK53RU -m comment --comment ns1/svc1:p80 -s 10.244.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-6KG6DFHVBKBK53RU -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.244.0.1:80 - -A KUBE-SEP-KDGX2M2ONE25PSWH -m comment --comment ns1/svc1:p80 -s 10.244.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-KDGX2M2ONE25PSWH -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.244.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-6KG6DFHVBKBK53RU - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.2.1:80" -j KUBE-SEP-KDGX2M2ONE25PSWH - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.2.1:80" -j KUBE-SEP-KDGX2M2ONE25PSWH - COMMIT - `) - svcIP := "10.69.0.10" - svcPort := 80 - svcNodePort := 30001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - }), - ) - - epIP1 := "10.244.0.1" - epIP2 := "10.244.2.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - NodeName: nil, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) -} - -func TestDisableLocalhostNodePortsIPv4WithNodeAddress(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - fp.localDetector = proxyutiliptables.NewNoOpLocalDetector() - fp.localhostNodePorts = false - fp.networkInterfacer.InterfaceAddrs() - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}) - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-6KG6DFHVBKBK53RU - [0:0] - :KUBE-SEP-KDGX2M2ONE25PSWH - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 30001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.69.0.10 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-6KG6DFHVBKBK53RU -m comment --comment ns1/svc1:p80 -s 10.244.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-6KG6DFHVBKBK53RU -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.244.0.1:80 - -A KUBE-SEP-KDGX2M2ONE25PSWH -m comment --comment ns1/svc1:p80 -s 10.244.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-KDGX2M2ONE25PSWH -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.244.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-6KG6DFHVBKBK53RU - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.2.1:80" -j KUBE-SEP-KDGX2M2ONE25PSWH - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.244.2.1:80" -j KUBE-SEP-KDGX2M2ONE25PSWH - COMMIT - `) - svcIP := "10.69.0.10" - svcPort := 80 - svcNodePort := 30001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - }), - ) - - epIP1 := "10.244.0.1" - epIP2 := "10.244.2.1" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv4 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - NodeName: nil, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) -} - -func TestEnableLocalhostNodePortsIPv6(t *testing.T) { - ipt := iptablestest.NewIPv6Fake() - fp := NewFakeProxier(ipt) - fp.localDetector = proxyutiliptables.NewNoOpLocalDetector() - fp.localhostNodePorts = true - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-LIGRYQQLSZN4UWQ5 - [0:0] - :KUBE-SEP-XJJ5QXWGJG344QDZ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 30001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d fd00:ab34::20 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL ! -d ::1/128 -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-LIGRYQQLSZN4UWQ5 -m comment --comment ns1/svc1:p80 -s ff06::c1 -j KUBE-MARK-MASQ - -A KUBE-SEP-LIGRYQQLSZN4UWQ5 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination [ff06::c1]:80 - -A KUBE-SEP-XJJ5QXWGJG344QDZ -m comment --comment ns1/svc1:p80 -s ff06::c2 -j KUBE-MARK-MASQ - -A KUBE-SEP-XJJ5QXWGJG344QDZ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination [ff06::c2]:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> [ff06::c1]:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-LIGRYQQLSZN4UWQ5 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> [ff06::c2]:80" -j KUBE-SEP-XJJ5QXWGJG344QDZ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> [ff06::c2]:80" -j KUBE-SEP-XJJ5QXWGJG344QDZ - COMMIT - `) - svcIP := "fd00:ab34::20" - svcPort := 80 - svcNodePort := 30001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal - }), - ) - - epIP1 := "ff06::c1" - epIP2 := "ff06::c2" - populateEndpointSlices(fp, - makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv6 - eps.Endpoints = []discovery.Endpoint{{ - Addresses: []string{epIP1}, - NodeName: nil, - }, { - Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), - }} - eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, - }} - }), - ) - - fp.syncProxyRules() - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) -} - -func TestDisableLocalhostNodePortsIPv6(t *testing.T) { - ipt := iptablestest.NewIPv6Fake() - fp := NewFakeProxier(ipt) - fp.localDetector = proxyutiliptables.NewNoOpLocalDetector() - fp.localhostNodePorts = false - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-LIGRYQQLSZN4UWQ5 - [0:0] - :KUBE-SEP-XJJ5QXWGJG344QDZ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 30001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d fd00:ab34::20 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL ! -d ::1/128 -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-LIGRYQQLSZN4UWQ5 -m comment --comment ns1/svc1:p80 -s ff06::c1 -j KUBE-MARK-MASQ - -A KUBE-SEP-LIGRYQQLSZN4UWQ5 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination [ff06::c1]:80 - -A KUBE-SEP-XJJ5QXWGJG344QDZ -m comment --comment ns1/svc1:p80 -s ff06::c2 -j KUBE-MARK-MASQ - -A KUBE-SEP-XJJ5QXWGJG344QDZ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination [ff06::c2]:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> [ff06::c1]:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-LIGRYQQLSZN4UWQ5 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> [ff06::c2]:80" -j KUBE-SEP-XJJ5QXWGJG344QDZ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> [ff06::c2]:80" -j KUBE-SEP-XJJ5QXWGJG344QDZ - COMMIT - `) - svcIP := "fd00:ab34::20" - svcPort := 80 - svcNodePort := 30001 - svcPortName := proxy.ServicePortName{ - NamespacedName: makeNSN("ns1", "svc1"), - Port: "p80", - Protocol: v1.ProtocolTCP, - } - - makeServiceMap(fp, - makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" - svc.Spec.ClusterIP = svcIP - svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), - }} + svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal + svc.Spec.ClusterIP = svcIP + svc.Spec.ExternalIPs = []string{svcExternalIPs} + svc.Spec.Ports = []v1.ServicePort{{ + Name: svcPortName.Port, + Port: int32(svcPort), + Protocol: v1.ProtocolTCP, + NodePort: int32(svcNodePort), + TargetPort: intstr.FromInt32(int32(svcPort)), + }} + svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort) + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: svcLBIP, + }} }), ) - epIP1 := "ff06::c1" - epIP2 := "ff06::c2" + epIP1 := "10.180.0.1" + epIP2 := "10.180.2.1" populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { - eps.AddressType = discovery.AddressTypeIPv6 + eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIP1}, - NodeName: nil, }, { Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) fp.syncProxyRules() - assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) -} - -func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) { - ipt := iptablestest.NewFake() - fp := NewFakeProxier(ipt) - fp.localDetector = proxyutiliptables.NewNoOpLocalDetector() - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"192.168.0.0/24", "2001:db8::/64"}) - fp.localhostNodePorts = false - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 192.168.0.2 -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - COMMIT - `) - onlyLocalNodePorts(t, fp, ipt, expected, getLine()) + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ + { + name: "pod to cluster IP hits both endpoints, unmasqueraded", + sourceIP: "10.0.0.2", + destIP: svcIP, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: false, + }, + { + name: "pod to external IP hits both endpoints, unmasqueraded", + sourceIP: "10.0.0.2", + destIP: svcExternalIPs, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: false, + }, + { + name: "external to external IP hits only local endpoint, unmasqueraded", + sourceIP: testExternalClient, + destIP: svcExternalIPs, + destPort: svcPort, + output: fmt.Sprintf("%s:%d", epIP2, svcPort), + masq: false, + }, + { + name: "pod to LB IP hits only both endpoints, unmasqueraded", + sourceIP: "10.0.0.2", + destIP: svcLBIP, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: false, + }, + { + name: "external to LB IP hits only local endpoint, unmasqueraded", + sourceIP: testExternalClient, + destIP: svcLBIP, + destPort: svcPort, + output: fmt.Sprintf("%s:%d", epIP2, svcPort), + masq: false, + }, + { + name: "pod to NodePort hits both endpoints, unmasqueraded", + sourceIP: "10.0.0.2", + destIP: testNodeIP, + destPort: svcNodePort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: false, + }, + { + name: "external to NodePort hits only local endpoint, unmasqueraded", + sourceIP: testExternalClient, + destIP: testNodeIP, + destPort: svcNodePort, + output: fmt.Sprintf("%s:%d", epIP2, svcPort), + masq: false, + }, + }) } -func TestOnlyLocalNodePorts(t *testing.T) { +// TestExternalTrafficPolicyCluster tests that traffic to an externally-facing IP gets +// masqueraded when using Cluster traffic policy. +func TestExternalTrafficPolicyCluster(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"192.168.0.0/24", "2001:db8::/64"}) - fp.localhostNodePorts = false - - expected := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] - :KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0] - :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] - :KUBE-SVL-XPGD46QRK7WJZT7O - [0:0] - -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 192.168.0.2 -j KUBE-NODEPORTS - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "pod traffic for ns1/svc1:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O - -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80 - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ - -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - -A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ - COMMIT - `) - onlyLocalNodePorts(t, fp, ipt, expected, getLine()) -} -func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables, expected string, line int) { svcIP := "172.30.0.41" svcPort := 80 svcNodePort := 3001 + svcExternalIPs := "192.168.99.11" + svcLBIP := "1.2.3.4" svcPortName := proxy.ServicePortName{ NamespacedName: makeNSN("ns1", "svc1"), Port: "p80", - Protocol: v1.ProtocolTCP, } makeServiceMap(fp, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { - svc.Spec.Type = "NodePort" + svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ClusterIP = svcIP + svc.Spec.ExternalIPs = []string{svcExternalIPs} svc.Spec.Ports = []v1.ServicePort{{ - Name: svcPortName.Port, - Port: int32(svcPort), - Protocol: v1.ProtocolTCP, - NodePort: int32(svcNodePort), + Name: svcPortName.Port, + Port: int32(svcPort), + Protocol: v1.ProtocolTCP, + NodePort: int32(svcNodePort), + TargetPort: intstr.FromInt32(int32(svcPort)), }} - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: svcLBIP, + }} + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster }), ) @@ -3858,23 +2944,21 @@ func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTable NodeName: nil, }, { Addresses: []string{epIP2}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) fp.syncProxyRules() - assertIPTablesRulesEqual(t, line, true, expected, fp.iptablesData.String()) - - runPacketFlowTests(t, line, ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, getLine(), ipt, testNodeIPs, []packetFlowTest{ { - name: "pod to cluster IP hit both endpoints", + name: "pod to cluster IP hits both endpoints, unmasqueraded", sourceIP: "10.0.0.2", destIP: svcIP, destPort: svcPort, @@ -3882,48 +2966,54 @@ func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTable masq: false, }, { - name: "external to NodePort hits only local endpoint", + name: "pod to external IP hits both endpoints, masqueraded", + sourceIP: "10.0.0.2", + destIP: svcExternalIPs, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: true, + }, + { + name: "external to external IP hits both endpoints, masqueraded", + sourceIP: testExternalClient, + destIP: svcExternalIPs, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: true, + }, + { + name: "pod to LB IP hits both endpoints, masqueraded", + sourceIP: "10.0.0.2", + destIP: svcLBIP, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: true, + }, + { + name: "external to LB IP hits both endpoints, masqueraded", sourceIP: testExternalClient, + destIP: svcLBIP, + destPort: svcPort, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: true, + }, + { + name: "pod to NodePort hits both endpoints, masqueraded", + sourceIP: "10.0.0.2", destIP: testNodeIP, destPort: svcNodePort, - output: fmt.Sprintf("%s:%d", epIP2, svcPort), - masq: false, + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: true, }, { - name: "pod to localhost doesn't work because localhost is not in nodePortAddresses", - sourceIP: "10.0.0.2", - destIP: "127.0.0.1", + name: "external to NodePort hits both endpoints, masqueraded", + sourceIP: testExternalClient, + destIP: testNodeIP, destPort: svcNodePort, - output: "", + output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), + masq: true, }, }) - - if fp.localDetector.IsImplemented() { - // pod-to-NodePort is treated as internal traffic, so we see both endpoints - runPacketFlowTests(t, line, ipt, testNodeIP, []packetFlowTest{ - { - name: "pod to NodePort hits both endpoints", - sourceIP: "10.0.0.2", - destIP: testNodeIP, - destPort: svcNodePort, - output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort), - masq: false, - }, - }) - } else { - // pod-to-NodePort is (incorrectly) treated as external traffic - // when there is no LocalTrafficDetector. - runPacketFlowTests(t, line, ipt, testNodeIP, []packetFlowTest{ - { - name: "pod to NodePort hits only local endpoint", - sourceIP: "10.0.0.2", - destIP: testNodeIP, - destPort: svcNodePort, - output: fmt.Sprintf("%s:%d", epIP2, svcPort), - masq: false, - }, - }) - } } func TestComputeProbability(t *testing.T) { @@ -4276,7 +3366,12 @@ func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) { proxier.servicesSynced = true } -func compareEndpointsMapsExceptChainName(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) { +type endpointExpectation struct { + endpoint string + isLocal bool +} + +func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) { if len(newMap) != len(expected) { t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap) } @@ -4285,13 +3380,9 @@ func compareEndpointsMapsExceptChainName(t *testing.T, tci int, newMap proxy.End t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) } else { for i := range expected[x] { - newEp, ok := newMap[x][i].(*endpointsInfo) - if !ok { - t.Errorf("Failed to cast endpointsInfo") - continue - } - if newEp.Endpoint != expected[x][i].Endpoint || - newEp.IsLocal != expected[x][i].IsLocal { + newEp := newMap[x][i] + if newEp.String() != expected[x][i].endpoint || + newEp.IsLocal() != expected[x][i].isLocal { t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp) } } @@ -4300,9 +3391,6 @@ func compareEndpointsMapsExceptChainName(t *testing.T, tci int, newMap proxy.End } func TestUpdateEndpointsMap(t *testing.T) { - var nodeName = testHostname - udpProtocol := v1.ProtocolUDP - emptyEndpointSlices := []*discovery.EndpointSlice{ makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}), } @@ -4312,9 +3400,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } subset2 := func(eps *discovery.EndpointSlice) { @@ -4323,9 +3411,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPortLocal := []*discovery.EndpointSlice{ @@ -4334,12 +3422,12 @@ func TestUpdateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -4354,9 +3442,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11-2"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11-2"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -4368,9 +3456,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(22), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -4382,16 +3470,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.1"}, }, { Addresses: []string{"10.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -4403,12 +3491,12 @@ func TestUpdateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsWithLocal := []*discovery.EndpointSlice{ @@ -4419,16 +3507,16 @@ func TestUpdateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } subset3 := func(eps *discovery.EndpointSlice) { @@ -4437,9 +3525,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p13"), - Port: pointer.Int32(13), - Protocol: &udpProtocol, + Name: ptr.To("p13"), + Port: ptr.To[int32](13), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{ @@ -4452,16 +3540,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.1"}, }, { Addresses: []string{"10.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) { @@ -4470,16 +3558,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.3"}, }, { Addresses: []string{"10.1.1.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p13"), - Port: pointer.Int32(13), - Protocol: &udpProtocol, + Name: ptr.To("p13"), + Port: ptr.To[int32](13), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p14"), - Port: pointer.Int32(14), - Protocol: &udpProtocol, + Name: ptr.To("p14"), + Port: ptr.To[int32](14), + Protocol: ptr.To(v1.ProtocolUDP), }} } subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) { @@ -4488,16 +3576,16 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.2.2.1"}, }, { Addresses: []string{"10.2.2.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p21"), - Port: pointer.Int32(21), - Protocol: &udpProtocol, + Name: ptr.To("p21"), + Port: ptr.To[int32](21), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p22"), - Port: pointer.Int32(22), - Protocol: &udpProtocol, + Name: ptr.To("p22"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsIPsPorts := []*discovery.EndpointSlice{ @@ -4509,54 +3597,54 @@ func TestUpdateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.2.2.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.2.2.22"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p22"), - Port: pointer.Int32(22), - Protocol: &udpProtocol, + Name: ptr.To("p22"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset2 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.2.2.3"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p23"), - Port: pointer.Int32(23), - Protocol: &udpProtocol, + Name: ptr.To("p23"), + Port: ptr.To[int32](23), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset3 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.4.4.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.4.4.5"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p44"), - Port: pointer.Int32(44), - Protocol: &udpProtocol, + Name: ptr.To("p44"), + Port: ptr.To[int32](44), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset4 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.4.4.6"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p45"), - Port: pointer.Int32(45), - Protocol: &udpProtocol, + Name: ptr.To("p45"), + Port: ptr.To[int32](45), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset5 := func(eps *discovery.EndpointSlice) { @@ -4567,9 +3655,9 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.11"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset6 := func(eps *discovery.EndpointSlice) { @@ -4578,13 +3666,13 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.1.1.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p122"), - Port: pointer.Int32(122), - Protocol: &udpProtocol, + Name: ptr.To("p122"), + Port: ptr.To[int32](122), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset7 := func(eps *discovery.EndpointSlice) { @@ -4593,21 +3681,21 @@ func TestUpdateEndpointsMap(t *testing.T) { Addresses: []string{"10.3.3.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p33"), - Port: pointer.Int32(33), - Protocol: &udpProtocol, + Name: ptr.To("p33"), + Port: ptr.To[int32](33), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset8 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.4.4.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p44"), - Port: pointer.Int32(44), - Protocol: &udpProtocol, + Name: ptr.To("p44"), + Port: ptr.To[int32](44), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore := []*discovery.EndpointSlice{ @@ -4636,16 +3724,16 @@ func TestUpdateEndpointsMap(t *testing.T) { name string previousEndpoints []*discovery.EndpointSlice currentEndpoints []*discovery.EndpointSlice - oldEndpoints map[proxy.ServicePortName][]*endpointsInfo - expectedResult map[proxy.ServicePortName][]*endpointsInfo + oldEndpoints map[proxy.ServicePortName][]endpointExpectation + expectedResult map[proxy.ServicePortName][]endpointExpectation expectedDeletedUDPEndpoints []proxy.ServiceEndpoint expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool expectedLocalEndpoints map[types.NamespacedName]int }{{ // Case[0]: nothing name: "nothing", - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{}, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{}, expectedLocalEndpoints: map[types.NamespacedName]int{}, @@ -4654,14 +3742,14 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "no change, named port, local", previousEndpoints: namedPortLocal, currentEndpoints: namedPortLocal, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4674,20 +3762,20 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "no change, multiple subsets", previousEndpoints: multipleSubsets, currentEndpoints: multipleSubsets, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.2:12", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.2:12", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4698,26 +3786,26 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "no change, multiple subsets, multiple ports, local", previousEndpoints: multipleSubsetsMultiplePortsLocal, currentEndpoints: multipleSubsetsMultiplePortsLocal, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.3:13", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.3:13", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4730,56 +3818,56 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "no change, multiple endpoints, subsets, IPs, and ports", previousEndpoints: multipleSubsetsIPsPorts, currentEndpoints: multipleSubsetsIPsPorts, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, + {endpoint: "10.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:12", isLocal: false}, + {endpoint: "10.1.1.2:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.3:13", isLocal: false}, + {endpoint: "10.1.1.4:13", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.3:14", isLocal: false}, + {endpoint: "10.1.1.4:14", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.2.2.1:21", isLocal: false}, + {endpoint: "10.2.2.2:21", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.2.2.1:22", isLocal: false}, + {endpoint: "10.2.2.2:22", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, + {endpoint: "10.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:12", isLocal: false}, + {endpoint: "10.1.1.2:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.3:13", isLocal: false}, + {endpoint: "10.1.1.4:13", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.3:14", isLocal: false}, + {endpoint: "10.1.1.4:14", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.2.2.1:21", isLocal: false}, + {endpoint: "10.2.2.2:21", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.2.2.1:22", isLocal: false}, + {endpoint: "10.2.2.2:22", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4793,10 +3881,10 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "add an Endpoints", previousEndpoints: []*discovery.EndpointSlice{nil}, currentEndpoints: namedPortLocal, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4811,12 +3899,12 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "remove an Endpoints", previousEndpoints: namedPortLocal, currentEndpoints: []*discovery.EndpointSlice{nil}, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{}, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ Endpoint: "10.1.1.1:11", ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP), @@ -4828,19 +3916,19 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "add an IP and port", previousEndpoints: namedPort, currentEndpoints: namedPortsLocalNoLocal, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, + {endpoint: "10.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:12", isLocal: false}, + {endpoint: "10.1.1.2:12", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4855,19 +3943,19 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "remove an IP and port", previousEndpoints: namedPortsLocalNoLocal, currentEndpoints: namedPort, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, + {endpoint: "10.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:12", isLocal: false}, + {endpoint: "10.1.1.2:12", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -4887,17 +3975,17 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "add a subset", previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil}, currentEndpoints: multipleSubsetsWithLocal, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.2:12", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -4912,17 +4000,17 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "remove a subset", previousEndpoints: multipleSubsets, currentEndpoints: []*discovery.EndpointSlice{namedPort[0], nil}, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.2:12", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -4936,14 +4024,14 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "rename a port", previousEndpoints: namedPort, currentEndpoints: namedPortRenamed, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -4959,14 +4047,14 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "renumber a port", previousEndpoints: namedPort, currentEndpoints: namedPortRenumbered, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:22", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -4980,41 +4068,41 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "complex add and remove", previousEndpoints: complexBefore, currentEndpoints: complexAfter, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.2.2.22:22", isLocal: true}, + {endpoint: "10.2.2.2:22", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.2.2.3:23", isLocal: true}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.4.4.4:44", isLocal: true}, + {endpoint: "10.4.4.5:44", isLocal: true}, }, makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.4.4.6:45", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.11:11", isLocal: false}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.2:12", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.2:122", isLocal: false}, }, makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.3.3.3:33", isLocal: false}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.4.4.4:44", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -5046,10 +4134,10 @@ func TestUpdateEndpointsMap(t *testing.T) { name: "change from 0 endpoint address to 1 unnamed port", previousEndpoints: emptyEndpointSlices, currentEndpoints: namedPort, - oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{}, - expectedResult: map[proxy.ServicePortName][]*endpointsInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}}, + {endpoint: "10.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -5064,7 +4152,7 @@ func TestUpdateEndpointsMap(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) - fp.hostname = nodeName + fp.hostname = testHostname // First check that after adding all previous versions of endpoints, // the fp.oldEndpoints is as we expect. @@ -5074,7 +4162,7 @@ func TestUpdateEndpointsMap(t *testing.T) { } } fp.endpointsMap.Update(fp.endpointsChanges) - compareEndpointsMapsExceptChainName(t, tci, fp.endpointsMap, tc.oldEndpoints) + checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. if len(tc.previousEndpoints) != len(tc.currentEndpoints) { @@ -5094,7 +4182,7 @@ func TestUpdateEndpointsMap(t *testing.T) { } result := fp.endpointsMap.Update(fp.endpointsChanges) newMap := fp.endpointsMap - compareEndpointsMapsExceptChainName(t, tci, newMap, tc.expectedResult) + checkEndpointExpectations(t, tci, newMap, tc.expectedResult) if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) { t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints) } @@ -5158,27 +4246,27 @@ func TestHealthCheckNodePortWhenTerminating(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(testHostname), }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(false)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(false)}, + NodeName: ptr.To(testHostname), }}, } @@ -5197,43 +4285,43 @@ func TestHealthCheckNodePortWhenTerminating(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }}, } @@ -5307,20 +4395,19 @@ func TestProxierDeleteNodePortStaleUDP(t *testing.T) { } epIP := "10.180.0.1" - udpProtocol := v1.ProtocolUDP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIP}, Conditions: discovery.EndpointConditions{ - Serving: pointer.Bool(false), + Serving: ptr.To(false), }, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &udpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolUDP), }} }), ) @@ -5337,13 +4424,13 @@ func TestProxierDeleteNodePortStaleUDP(t *testing.T) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIP}, Conditions: discovery.EndpointConditions{ - Serving: pointer.Bool(true), + Serving: ptr.To(true), }, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &udpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolUDP), }} }), ) @@ -5431,9 +4518,9 @@ func TestProxierMetricsIptablesTotalRules(t *testing.T) { Addresses: []string{"10.0.0.5"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -5460,76 +4547,28 @@ func TestProxierMetricsIptablesTotalRules(t *testing.T) { // This test ensures that the iptables proxier supports translating Endpoints to // iptables output when internalTrafficPolicy is specified -func TestInternalTrafficPolicyE2E(t *testing.T) { +func TestInternalTrafficPolicy(t *testing.T) { type endpoint struct { ip string hostname string } - cluster := v1.ServiceInternalTrafficPolicyCluster - local := v1.ServiceInternalTrafficPolicyLocal - - clusterExpectedIPTables := dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ - -A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -j KUBE-SEP-XGJFVO3L2O5SRFNT - COMMIT - `) - testCases := []struct { - name string - line int - internalTrafficPolicy *v1.ServiceInternalTrafficPolicy - endpoints []endpoint - expectEndpointRule bool - expectedIPTablesWithSlice string - flowTests []packetFlowTest + name string + line int + internalTrafficPolicy *v1.ServiceInternalTrafficPolicy + endpoints []endpoint + flowTests []packetFlowTest }{ { name: "internalTrafficPolicy is cluster", line: getLine(), - internalTrafficPolicy: &cluster, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, {"10.0.1.3", "host2"}, }, - expectEndpointRule: true, - expectedIPTablesWithSlice: clusterExpectedIPTables, flowTests: []packetFlowTest{ { name: "pod to ClusterIP hits all endpoints", @@ -5542,47 +4581,14 @@ func TestInternalTrafficPolicyE2E(t *testing.T) { }, }, { - name: "internalTrafficPolicy is local and there are local endpoints", - line: getLine(), - internalTrafficPolicy: &local, - endpoints: []endpoint{ - {"10.0.1.1", testHostname}, - {"10.0.1.2", "host1"}, - {"10.0.1.3", "host2"}, - }, - expectEndpointRule: true, - expectedIPTablesWithSlice: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0] - :KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVL-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -j KUBE-SEP-3JOIVZTXZZRGORX4 - COMMIT - `), + name: "internalTrafficPolicy is local and there is one local endpoint", + line: getLine(), + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + endpoints: []endpoint{ + {"10.0.1.1", testHostname}, + {"10.0.1.2", "host1"}, + {"10.0.1.3", "host2"}, + }, flowTests: []packetFlowTest{ { name: "pod to ClusterIP hits only local endpoint", @@ -5594,42 +4600,35 @@ func TestInternalTrafficPolicyE2E(t *testing.T) { }, }, }, + { + name: "internalTrafficPolicy is local and there are multiple local endpoints", + line: getLine(), + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + endpoints: []endpoint{ + {"10.0.1.1", testHostname}, + {"10.0.1.2", testHostname}, + {"10.0.1.3", "host2"}, + }, + flowTests: []packetFlowTest{ + { + name: "pod to ClusterIP hits all local endpoints", + sourceIP: "10.0.0.2", + destIP: "172.30.1.1", + destPort: 80, + output: "10.0.1.1:80, 10.0.1.2:80", + masq: false, + }, + }, + }, { name: "internalTrafficPolicy is local and there are no local endpoints", line: getLine(), - internalTrafficPolicy: &local, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, {"10.0.1.3", "host2"}, }, - expectEndpointRule: false, - expectedIPTablesWithSlice: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 has no local endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j DROP - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `), flowTests: []packetFlowTest{ { name: "no endpoints", @@ -5673,33 +4672,27 @@ func TestInternalTrafficPolicyE2E(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, } for _, ep := range tc.endpoints { endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{ Addresses: []string{ep.ip}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(ep.hostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(ep.hostname), }) } fp.OnEndpointSliceAdd(endpointSlice) fp.syncProxyRules() - assertIPTablesRulesEqual(t, tc.line, true, tc.expectedIPTablesWithSlice, fp.iptablesData.String()) - runPacketFlowTests(t, tc.line, ipt, testNodeIP, tc.flowTests) + runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tc.flowTests) fp.OnEndpointSliceDelete(endpointSlice) fp.syncProxyRules() - if tc.expectEndpointRule { - fp.OnEndpointSliceDelete(endpointSlice) - fp.syncProxyRules() - assertIPTablesRulesNotEqual(t, tc.line, tc.expectedIPTablesWithSlice, fp.iptablesData.String()) - } - runPacketFlowTests(t, tc.line, ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, tc.line, ipt, testNodeIPs, []packetFlowTest{ { name: "endpoints deleted", sourceIP: "10.0.0.2", @@ -5715,14 +4708,12 @@ func TestInternalTrafficPolicyE2E(t *testing.T) { // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and // ready + terminating endpoints, only the ready endpoints are used. func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { - timeout := v1.DefaultClientIPServiceAffinitySeconds service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}, Spec: v1.ServiceSpec{ ClusterIP: "172.30.1.1", Type: v1.ServiceTypeLoadBalancer, ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, - Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{ { Name: "", @@ -5732,12 +4723,6 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { }, }, HealthCheckNodePort: 30000, - SessionAffinity: v1.ServiceAffinityClientIP, - SessionAffinityConfig: &v1.SessionAffinityConfig{ - ClientIP: &v1.ClientIPConfig{ - TimeoutSeconds: &timeout, - }, - }, }, Status: v1.ServiceStatus{ LoadBalancer: v1.LoadBalancerStatus{ @@ -5749,12 +4734,10 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { } testcases := []struct { - name string - line int - endpointslice *discovery.EndpointSlice - expectedIPTables string - noUsableEndpoints bool - flowTests []packetFlowTest + name string + line int + endpointslice *discovery.EndpointSlice + flowTests []packetFlowTest }{ { name: "ready endpoints exist", @@ -5766,117 +4749,62 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be ignored for external since there are ready non-terminating endpoints Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be ignored for external since there are ready non-terminating endpoints Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be ignored for external since it's not local Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String("host-1"), + NodeName: ptr.To("host-1"), }, }, }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -j KUBE-SEP-IO5XOSKPAXIFQXAJ - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -5906,9 +4834,9 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ @@ -5916,95 +4844,44 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { // this endpoint should be used since there are only ready terminating endpoints Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be used since there are only ready terminating endpoints Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should not be used since it is both terminating and not ready. Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be ignored for external since it's not local Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String("host-1"), + NodeName: ptr.To("host-1"), }, }, }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ - -A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --rcheck --seconds 10800 --reap -j KUBE-SEP-XGJFVO3L2O5SRFNT - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -j KUBE-SEP-XGJFVO3L2O5SRFNT - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -6034,9 +4911,9 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ @@ -6045,54 +4922,14 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { // but it will prevent a REJECT rule from being created Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String("host-1"), + NodeName: ptr.To("host-1"), }, }, }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -6120,9 +4957,9 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ @@ -6130,53 +4967,24 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { // Local but not ready or serving Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // Remote and not ready or serving Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String("host-1"), + NodeName: ptr.To("host-1"), }, }, }, - noUsableEndpoints: true, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP, no usable endpoints", @@ -6207,18 +5015,11 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { fp.OnEndpointSliceAdd(testcase.endpointslice) fp.syncProxyRules() - assertIPTablesRulesEqual(t, testcase.line, true, testcase.expectedIPTables, fp.iptablesData.String()) - runPacketFlowTests(t, testcase.line, ipt, testNodeIP, testcase.flowTests) + runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests) fp.OnEndpointSliceDelete(testcase.endpointslice) fp.syncProxyRules() - if testcase.noUsableEndpoints { - // Deleting the EndpointSlice should have had no effect - assertIPTablesRulesEqual(t, testcase.line, true, testcase.expectedIPTables, fp.iptablesData.String()) - } else { - assertIPTablesRulesNotEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String()) - } - runPacketFlowTests(t, testcase.line, ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{ { name: "pod to clusterIP after endpoints deleted", sourceIP: "10.0.0.2", @@ -6241,14 +5042,12 @@ func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide // ready and ready + terminating endpoints, only the ready endpoints are used. func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { - timeout := v1.DefaultClientIPServiceAffinitySeconds service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}, Spec: v1.ServiceSpec{ ClusterIP: "172.30.1.1", Type: v1.ServiceTypeLoadBalancer, ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster, - Selector: map[string]string{"foo": "bar"}, Ports: []v1.ServicePort{ { Name: "", @@ -6258,12 +5057,6 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { }, }, HealthCheckNodePort: 30000, - SessionAffinity: v1.ServiceAffinityClientIP, - SessionAffinityConfig: &v1.SessionAffinityConfig{ - ClientIP: &v1.ClientIPConfig{ - TimeoutSeconds: &timeout, - }, - }, }, Status: v1.ServiceStatus{ LoadBalancer: v1.LoadBalancerStatus{ @@ -6275,12 +5068,10 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { } testcases := []struct { - name string - line int - endpointslice *discovery.EndpointSlice - expectedIPTables string - noUsableEndpoints bool - flowTests []packetFlowTest + name string + line int + endpointslice *discovery.EndpointSlice + flowTests []packetFlowTest }{ { name: "ready endpoints exist", @@ -6292,108 +5083,61 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be ignored since there are ready non-terminating endpoints Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, { // this endpoint should be ignored since it is not "serving" Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, { Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, }, }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -6423,9 +5167,9 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ @@ -6433,91 +5177,44 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { // this endpoint should be used since there are only ready terminating endpoints Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should be used since there are only ready terminating endpoints Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // this endpoint should not be used since it is both terminating and not ready. Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, { // this endpoint should be used since there are only ready terminating endpoints Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, }, }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ - -A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --rcheck --seconds 10800 --reap -j KUBE-SEP-XGJFVO3L2O5SRFNT - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-XGJFVO3L2O5SRFNT - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -6547,60 +5244,23 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String("host-1"), + NodeName: ptr.To("host-1"), }, }, }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -6630,9 +5290,9 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: "svc1"}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ @@ -6640,52 +5300,24 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { // Local, not ready or serving Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // Remote, not ready or serving Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String("host-1"), + NodeName: ptr.To("host-1"), }, }, }, - noUsableEndpoints: true, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `), flowTests: []packetFlowTest{ { name: "pod to clusterIP", @@ -6717,18 +5349,11 @@ func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { fp.OnEndpointSliceAdd(testcase.endpointslice) fp.syncProxyRules() - assertIPTablesRulesEqual(t, testcase.line, true, testcase.expectedIPTables, fp.iptablesData.String()) - runPacketFlowTests(t, testcase.line, ipt, testNodeIP, testcase.flowTests) + runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, testcase.flowTests) fp.OnEndpointSliceDelete(testcase.endpointslice) fp.syncProxyRules() - if testcase.noUsableEndpoints { - // Deleting the EndpointSlice should have had no effect - assertIPTablesRulesEqual(t, testcase.line, true, testcase.expectedIPTables, fp.iptablesData.String()) - } else { - assertIPTablesRulesNotEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String()) - } - runPacketFlowTests(t, testcase.line, ipt, testNodeIP, []packetFlowTest{ + runPacketFlowTests(t, testcase.line, ipt, testNodeIPs, []packetFlowTest{ { name: "pod to clusterIP after endpoints deleted", sourceIP: "10.0.0.2", @@ -6752,8 +5377,6 @@ func TestInternalExternalMasquerade(t *testing.T) { // (Put the test setup code in an internal function so we can have it here at the // top, before the test cases that will be run against it.) setupTest := func(fp *Proxier) { - local := v1.ServiceInternalTrafficPolicyLocal - makeServiceMap(fp, makeTestService("ns1", "svc1", func(svc *v1.Service) { svc.Spec.Type = "LoadBalancer" @@ -6794,7 +5417,7 @@ func TestInternalExternalMasquerade(t *testing.T) { NodePort: int32(3003), }} svc.Spec.HealthCheckNodePort = 30003 - svc.Spec.InternalTrafficPolicy = &local + svc.Spec.InternalTrafficPolicy = ptr.To(v1.ServiceInternalTrafficPolicyLocal) svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ IP: "9.10.11.12", }} @@ -6807,17 +5430,17 @@ func TestInternalExternalMasquerade(t *testing.T) { eps.Endpoints = []discovery.Endpoint{ { Addresses: []string{"10.180.0.1"}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.180.1.1"}, - NodeName: pointer.String("remote"), + NodeName: ptr.To("remote"), }, } eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) { @@ -6825,17 +5448,17 @@ func TestInternalExternalMasquerade(t *testing.T) { eps.Endpoints = []discovery.Endpoint{ { Addresses: []string{"10.180.0.2"}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.180.1.2"}, - NodeName: pointer.String("remote"), + NodeName: ptr.To("remote"), }, } eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) { @@ -6843,17 +5466,17 @@ func TestInternalExternalMasquerade(t *testing.T) { eps.Endpoints = []discovery.Endpoint{ { Addresses: []string{"10.180.0.3"}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.180.1.3"}, - NodeName: pointer.String("remote"), + NodeName: ptr.To("remote"), }, } eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -7206,31 +5829,31 @@ func TestInternalExternalMasquerade(t *testing.T) { // ClusterIP is assumed to be from a pod, and thus to not // require masquerading. "node to ClusterIP": { - masq: pointer.Bool(false), + masq: ptr.To(false), }, "node to ClusterIP with eTP:Local": { - masq: pointer.Bool(false), + masq: ptr.To(false), }, "node to ClusterIP with iTP:Local": { - masq: pointer.Bool(false), + masq: ptr.To(false), }, "external to ClusterIP": { - masq: pointer.Bool(false), + masq: ptr.To(false), }, "external to ClusterIP with eTP:Local": { - masq: pointer.Bool(false), + masq: ptr.To(false), }, "external to ClusterIP with iTP:Local": { - masq: pointer.Bool(false), + masq: ptr.To(false), }, // And there's no eTP:Local short-circuit for pod traffic, // so pods get only the local endpoints. "pod to NodePort with eTP:Local": { - output: pointer.String("10.180.0.2:80"), + output: ptr.To("10.180.0.2:80"), }, "pod to LB with eTP:Local": { - output: pointer.String("10.180.0.2:80"), + output: ptr.To("10.180.0.2:80"), }, }, }, @@ -7243,13 +5866,13 @@ func TestInternalExternalMasquerade(t *testing.T) { // All "to ClusterIP" traffic gets masqueraded when using // --masquerade-all. "pod to ClusterIP": { - masq: pointer.Bool(true), + masq: ptr.To(true), }, "pod to ClusterIP with eTP:Local": { - masq: pointer.Bool(true), + masq: ptr.To(true), }, "pod to ClusterIP with iTP:Local": { - masq: pointer.Bool(true), + masq: ptr.To(true), }, }, }, @@ -7261,21 +5884,21 @@ func TestInternalExternalMasquerade(t *testing.T) { overrides: map[string]packetFlowTestOverride{ // As in "masqueradeAll" "pod to ClusterIP": { - masq: pointer.Bool(true), + masq: ptr.To(true), }, "pod to ClusterIP with eTP:Local": { - masq: pointer.Bool(true), + masq: ptr.To(true), }, "pod to ClusterIP with iTP:Local": { - masq: pointer.Bool(true), + masq: ptr.To(true), }, // As in "no LocalTrafficDetector" "pod to NodePort with eTP:Local": { - output: pointer.String("10.180.0.2:80"), + output: ptr.To("10.180.0.2:80"), }, "pod to LB with eTP:Local": { - output: pointer.String("10.180.0.2:80"), + output: ptr.To("10.180.0.2:80"), }, }, }, @@ -7315,7 +5938,7 @@ func TestInternalExternalMasquerade(t *testing.T) { if overridesApplied != len(tc.overrides) { t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied) } - runPacketFlowTests(t, tc.line, ipt, testNodeIP, tcFlowTests) + runPacketFlowTests(t, tc.line, ipt, testNodeIPs, tcFlowTests) }) } } @@ -7381,9 +6004,9 @@ func TestSyncProxyRulesLargeClusterMode(t *testing.T) { eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.0.%d.%d", i%256, i/256)} } eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) { @@ -7393,9 +6016,9 @@ func TestSyncProxyRulesLargeClusterMode(t *testing.T) { eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.1.%d.%d", i%256, i/256)} } eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p8080"), - Port: pointer.Int32(8080), - Protocol: &tcpProtocol, + Name: ptr.To("p8080"), + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -7422,9 +6045,9 @@ func TestSyncProxyRulesLargeClusterMode(t *testing.T) { Addresses: []string{"203.0.113.12"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p8081"), - Port: pointer.Int32(8081), - Protocol: &tcpProtocol, + Name: ptr.To("p8081"), + Port: ptr.To[int32](8081), + Protocol: ptr.To(v1.ProtocolTCP), }} })) fp.syncProxyRules() @@ -7474,9 +6097,9 @@ func TestSyncProxyRulesLargeClusterMode(t *testing.T) { Addresses: []string{"10.4.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p8082"), - Port: pointer.Int32(8082), - Protocol: &tcpProtocol, + Name: ptr.To("p8082"), + Port: ptr.To[int32](8082), + Protocol: ptr.To(v1.ProtocolTCP), }} })) fp.syncProxyRules() @@ -7560,9 +6183,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) { Addresses: []string{"10.0.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) { @@ -7571,9 +6194,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) { Addresses: []string{"10.0.2.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p8080"), - Port: pointer.Int32(8080), - Protocol: &tcpProtocol, + Name: ptr.To("p8080"), + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -7655,9 +6278,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) { Addresses: []string{"10.0.3.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -7830,9 +6453,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) { Addresses: []string{"10.0.4.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -8169,9 +6792,6 @@ func TestNoEndpointsMetric(t *testing.T) { hostname string } - internalTrafficPolicyLocal := v1.ServiceInternalTrafficPolicyLocal - externalTrafficPolicyLocal := v1.ServiceExternalTrafficPolicyLocal - metrics.RegisterMetrics() testCases := []struct { name string @@ -8183,7 +6803,7 @@ func TestNoEndpointsMetric(t *testing.T) { }{ { name: "internalTrafficPolicy is set and there are local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -8192,7 +6812,7 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "externalTrafficPolicy is set and there are local endpoints", - externalTrafficPolicy: externalTrafficPolicyLocal, + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -8201,8 +6821,8 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "both policies are set and there are local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, - externalTrafficPolicy: externalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -8211,7 +6831,7 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "internalTrafficPolicy is set and there are no local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -8221,7 +6841,7 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "externalTrafficPolicy is set and there are no local endpoints", - externalTrafficPolicy: externalTrafficPolicyLocal, + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -8231,8 +6851,8 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "both policies are set and there are no local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, - externalTrafficPolicy: externalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -8243,8 +6863,8 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "both policies are set and there are no endpoints at all", - internalTrafficPolicy: &internalTrafficPolicyLocal, - externalTrafficPolicy: externalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{}, expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0, expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0, @@ -8286,17 +6906,17 @@ func TestNoEndpointsMetric(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, } for _, ep := range tc.endpoints { endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{ Addresses: []string{ep.ip}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(ep.hostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(ep.hostname), }) } @@ -8324,9 +6944,6 @@ func TestNoEndpointsMetric(t *testing.T) { } func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { - ipModeProxy := v1.LoadBalancerIPModeProxy - ipModeVIP := v1.LoadBalancerIPModeVIP - testCases := []struct { name string ipModeEnabled bool @@ -8341,7 +6958,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: false, svcIP: "10.20.30.41", svcLBIP: "1.2.3.4", - ipMode: &ipModeProxy, + ipMode: ptr.To(v1.LoadBalancerIPModeProxy), expectedRule: true, }, { @@ -8349,7 +6966,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: false, svcIP: "10.20.30.42", svcLBIP: "1.2.3.5", - ipMode: &ipModeVIP, + ipMode: ptr.To(v1.LoadBalancerIPModeVIP), expectedRule: true, }, { @@ -8366,7 +6983,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: true, svcIP: "10.20.30.41", svcLBIP: "1.2.3.4", - ipMode: &ipModeProxy, + ipMode: ptr.To(v1.LoadBalancerIPModeProxy), expectedRule: false, }, { @@ -8374,7 +6991,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: true, svcIP: "10.20.30.42", svcLBIP: "1.2.3.5", - ipMode: &ipModeVIP, + ipMode: ptr.To(v1.LoadBalancerIPModeVIP), expectedRule: true, }, { @@ -8417,7 +7034,6 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { }), ) - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(fp, makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 @@ -8425,9 +7041,9 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 19c354e9ac13b..bf68149199ba2 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -84,6 +84,10 @@ const ( // https://github.com/kubernetes/kubernetes/issues/72236 kubeIPVSFilterChain utiliptables.Chain = "KUBE-IPVS-FILTER" + // kubeIPVSOutFilterChain filters access to load balancer services from node. + // https://github.com/kubernetes/kubernetes/issues/119656 + kubeIPVSOutFilterChain utiliptables.Chain = "KUBE-IPVS-OUT-FILTER" + // defaultScheduler is the default ipvs scheduler algorithm - round robin. defaultScheduler = "rr" @@ -113,6 +117,7 @@ var iptablesJumpChain = []struct { {utiliptables.TableFilter, utiliptables.ChainInput, kubeProxyFirewallChain, "kube-proxy firewall rules"}, {utiliptables.TableFilter, utiliptables.ChainForward, kubeProxyFirewallChain, "kube-proxy firewall rules"}, {utiliptables.TableFilter, utiliptables.ChainInput, kubeIPVSFilterChain, "kubernetes ipvs access filter"}, + {utiliptables.TableFilter, utiliptables.ChainOutput, kubeIPVSOutFilterChain, "kubernetes ipvs access filter"}, } var iptablesChains = []struct { @@ -129,6 +134,7 @@ var iptablesChains = []struct { {utiliptables.TableFilter, kubeProxyFirewallChain}, {utiliptables.TableFilter, kubeSourceRangesFirewallChain}, {utiliptables.TableFilter, kubeIPVSFilterChain}, + {utiliptables.TableFilter, kubeIPVSOutFilterChain}, } var iptablesCleanupChains = []struct { @@ -144,6 +150,7 @@ var iptablesCleanupChains = []struct { {utiliptables.TableFilter, kubeProxyFirewallChain}, {utiliptables.TableFilter, kubeSourceRangesFirewallChain}, {utiliptables.TableFilter, kubeIPVSFilterChain}, + {utiliptables.TableFilter, kubeIPVSOutFilterChain}, } // ipsetInfo is all ipset we needed in ipvs proxier @@ -201,7 +208,6 @@ var ipsetWithIptablesChain = []struct { // In IPVS proxy mode, the following flags need to be set const ( - sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables" sysctlVSConnTrack = "net/ipv4/vs/conntrack" sysctlConnReuse = "net/ipv4/vs/conn_reuse_mode" sysctlExpireNoDestConn = "net/ipv4/vs/expire_nodest_conn" @@ -220,7 +226,7 @@ type Proxier struct { // services that happened since last syncProxyRules call. For a single object, // changes are accumulated, i.e. previous is state from before all of them, // current is state after applying all of those. - endpointsChanges *proxy.EndpointChangeTracker + endpointsChanges *proxy.EndpointsChangeTracker serviceChanges *proxy.ServiceChangeTracker mu sync.Mutex // protects the following fields @@ -261,7 +267,7 @@ type Proxier struct { recorder events.EventRecorder serviceHealthServer healthcheck.ServiceHealthServer - healthzServer healthcheck.ProxierHealthUpdater + healthzServer *healthcheck.ProxierHealthServer ipvsScheduler string // The following buffers are used to reuse memory and avoid allocations @@ -296,6 +302,11 @@ type Proxier struct { // A Set is used here since we end up calculating endpoint topology multiple times for the same Service // if it has multiple ports but each Service should only be counted once. serviceNoLocalEndpointsExternal sets.Set[string] + // lbNoNodeAccessIPPortProtocolEntries represents the set of loadBalancers IP + Port + Protocol that should not be accessible from K8s nodes + // We cannot directly restrict LB access from node using LoadBalancerSourceRanges, we need to install + // additional iptables rules. + // (ref: https://github.com/kubernetes/kubernetes/issues/119656) + lbNoNodeAccessIPPortProtocolEntries []*utilipset.Entry } // Proxier implements proxy.Provider @@ -325,18 +336,12 @@ func NewProxier(ipFamily v1.IPFamily, hostname string, nodeIP net.IP, recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, + healthzServer *healthcheck.ProxierHealthServer, scheduler string, nodePortAddressStrings []string, kernelHandler KernelHandler, + initOnly bool, ) (*Proxier, error) { - // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers - // are connected to a Linux bridge (but not SDN bridges). Until most - // plugins handle this, log when config is missing - if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - klog.InfoS("Missing br-netfilter module or unset sysctl br-nf-call-iptables, proxy may not work as intended") - } - // Set the conntrack sysctl we need for if err := proxyutil.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil { return nil, err @@ -398,6 +403,11 @@ func NewProxier(ipFamily v1.IPFamily, } } + if initOnly { + klog.InfoS("System initialized and --init-only specified") + return nil, nil + } + // Generate the masquerade mark to use for SNAT rules. masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) @@ -421,7 +431,7 @@ func NewProxier(ipFamily v1.IPFamily, svcPortMap: make(proxy.ServicePortMap), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, ipFamily, recorder, nil), + endpointsChanges: proxy.NewEndpointsChangeTracker(hostname, nil, ipFamily, recorder, nil), initialSync: true, syncPeriod: syncPeriod, minSyncPeriod: minSyncPeriod, @@ -482,10 +492,11 @@ func NewDualStackProxier( hostname string, nodeIPs map[v1.IPFamily]net.IP, recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, + healthzServer *healthcheck.ProxierHealthServer, scheduler string, nodePortAddresses []string, kernelHandler KernelHandler, + initOnly bool, ) (proxy.Provider, error) { safeIpset := newSafeIpset(ipset) @@ -495,7 +506,7 @@ func NewDualStackProxier( exec, syncPeriod, minSyncPeriod, filterCIDRs(false, excludeCIDRs), strictARP, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, localDetectors[0], hostname, nodeIPs[v1.IPv4Protocol], - recorder, healthzServer, scheduler, nodePortAddresses, kernelHandler) + recorder, healthzServer, scheduler, nodePortAddresses, kernelHandler, initOnly) if err != nil { return nil, fmt.Errorf("unable to create ipv4 proxier: %v", err) } @@ -504,10 +515,13 @@ func NewDualStackProxier( exec, syncPeriod, minSyncPeriod, filterCIDRs(true, excludeCIDRs), strictARP, tcpTimeout, tcpFinTimeout, udpTimeout, masqueradeAll, masqueradeBit, localDetectors[1], hostname, nodeIPs[v1.IPv6Protocol], - recorder, healthzServer, scheduler, nodePortAddresses, kernelHandler) + recorder, healthzServer, scheduler, nodePortAddresses, kernelHandler, initOnly) if err != nil { return nil, fmt.Errorf("unable to create ipv6 proxier: %v", err) } + if initOnly { + return nil, nil + } // Return a meta-proxier that dispatch calls between the two // single-stack proxier instances @@ -756,7 +770,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset // Sync is called to synchronize the proxier state to iptables and ipvs as soon as possible. func (proxier *Proxier) Sync() { if proxier.healthzServer != nil { - proxier.healthzServer.QueuedUpdate() + proxier.healthzServer.QueuedUpdate(proxier.ipFamily) } metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() proxier.syncRunner.Run() @@ -766,7 +780,7 @@ func (proxier *Proxier) Sync() { func (proxier *Proxier) SyncLoop() { // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { - proxier.healthzServer.Updated() + proxier.healthzServer.Updated(proxier.ipFamily) } // synthesize "last change queued" time as the informers are syncing. metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() @@ -949,6 +963,9 @@ func (proxier *Proxier) syncProxyRules() { proxier.serviceNoLocalEndpointsInternal = sets.New[string]() proxier.serviceNoLocalEndpointsExternal = sets.New[string]() + + proxier.lbNoNodeAccessIPPortProtocolEntries = make([]*utilipset.Entry, 0) + // Begin install iptables // Reset all buffers used later. @@ -1045,7 +1062,7 @@ func (proxier *Proxier) syncProxyRules() { klog.ErrorS(nil, "Failed to cast BaseEndpointInfo", "endpoint", e) continue } - if !ep.IsLocal { + if !ep.IsLocal() { continue } epIP := ep.IP() @@ -1246,6 +1263,10 @@ func (proxier *Proxier) syncProxyRules() { continue } proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String()) + } else { + // since nodeIP is not covered in any of SourceRange we need to explicitly block the lbIP access from k8s nodes. + proxier.lbNoNodeAccessIPPortProtocolEntries = append(proxier.lbNoNodeAccessIPPortProtocolEntries, entry) + } } // ipvs call @@ -1482,7 +1503,7 @@ func (proxier *Proxier) syncProxyRules() { proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices) if proxier.healthzServer != nil { - proxier.healthzServer.Updated() + proxier.healthzServer.Updated(proxier.ipFamily) } metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() @@ -1637,6 +1658,17 @@ func (proxier *Proxier) writeIptablesRules() { "-j", "DROP", ) + // disable LB access from node + // for IPVS src and dst both would be lbIP + for _, entry := range proxier.lbNoNodeAccessIPPortProtocolEntries { + proxier.filterRules.Write( + "-A", string(kubeIPVSOutFilterChain), + "-s", entry.IP, + "-m", "ipvs", "--vaddr", entry.IP, "--vproto", entry.Protocol, "--vport", strconv.Itoa(entry.Port), + "-j", "DROP", + ) + } + // Accept all traffic with destination of ipvs virtual service, in case other iptables rules // block the traffic, that may result in ipvs rules invalid. // Those rules must be in the end of KUBE-SERVICE chain diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 569d0179ec0fe..eebd10256ea63 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -57,7 +57,7 @@ import ( "k8s.io/utils/exec" fakeexec "k8s.io/utils/exec/testing" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const testHostname = "test-hostname" @@ -154,7 +154,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u svcPortMap: make(proxy.ServicePortMap), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, nil, nil), endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, nil, ipFamily, nil, nil), + endpointsChanges: proxy.NewEndpointsChangeTracker(testHostname, nil, ipFamily, nil, nil), excludeCIDRs: excludeCIDRs, iptables: ipt, ipvs: ipvs, @@ -263,7 +263,6 @@ func TestCleanupLeftovers(t *testing.T) { }), ) epIP := "10.180.0.1" - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 @@ -271,9 +270,9 @@ func TestCleanupLeftovers(t *testing.T) { Addresses: []string{epIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -434,9 +433,6 @@ func TestGetNodeIPs(t *testing.T) { } func TestNodePortIPv4(t *testing.T) { - tcpProtocol := v1.ProtocolTCP - udpProtocol := v1.ProtocolUDP - sctpProtocol := v1.ProtocolSCTP tests := []struct { name string services []*v1.Service @@ -468,9 +464,9 @@ func TestNodePortIPv4(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns1", "svc1", 2, func(eps *discovery.EndpointSlice) { @@ -479,9 +475,9 @@ func TestNodePortIPv4(t *testing.T) { Addresses: []string{"1002:ab8::2:10"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), }, @@ -557,9 +553,9 @@ func TestNodePortIPv4(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &udpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolUDP), }} }), }, @@ -712,9 +708,9 @@ func TestNodePortIPv4(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &sctpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolSCTP), }} }), }, @@ -860,15 +856,15 @@ func TestNodePortIPv4(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"10.180.0.1"}, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.180.1.1"}, - NodeName: pointer.String("otherHost"), + NodeName: ptr.To("otherHost"), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &sctpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolSCTP), }} }), }, @@ -988,9 +984,6 @@ func TestNodePortIPv4(t *testing.T) { } func TestNodePortIPv6(t *testing.T) { - tcpProtocol := v1.ProtocolTCP - udpProtocol := v1.ProtocolUDP - sctpProtocol := v1.ProtocolSCTP tests := []struct { name string services []*v1.Service @@ -1022,9 +1015,9 @@ func TestNodePortIPv6(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns1", "svc1", 2, func(eps *discovery.EndpointSlice) { @@ -1033,9 +1026,9 @@ func TestNodePortIPv6(t *testing.T) { Addresses: []string{"1002:ab8::2:10"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), }, @@ -1113,9 +1106,9 @@ func TestNodePortIPv6(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &udpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolUDP), }} }), }, @@ -1206,9 +1199,9 @@ func TestNodePortIPv6(t *testing.T) { Addresses: []string{"2001::1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &sctpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolSCTP), }} }), }, @@ -1333,8 +1326,6 @@ func TestNodePortIPv6(t *testing.T) { } func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) { - tcpProtocol := v1.ProtocolTCP - ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) @@ -1354,9 +1345,9 @@ func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.StringPtr("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }) @@ -1368,7 +1359,7 @@ func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) { serv := &utilipvs.VirtualServer{ Address: netutils.ParseIPSloppy("10.20.30.41"), Port: uint16(80), - Protocol: string(tcpProtocol), + Protocol: string(v1.ProtocolTCP), Scheduler: fp.ipvsScheduler, } @@ -1396,7 +1387,7 @@ func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) { Namespace: "ns1", }, Port: "80", - Protocol: tcpProtocol, + Protocol: v1.ProtocolTCP, }, true, vs) if err != nil { t.Errorf("failed to sync endpoint, err: %v", err) @@ -1415,7 +1406,6 @@ func Test_syncEndpoint_updateWeightsOnRestart(t *testing.T) { } func TestIPv4Proxier(t *testing.T) { - tcpProtocol := v1.ProtocolTCP tests := []struct { name string services []*v1.Service @@ -1449,9 +1439,9 @@ func TestIPv4Proxier(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) { @@ -1460,9 +1450,9 @@ func TestIPv4Proxier(t *testing.T) { Addresses: []string{"1009:ab8::5:6"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p8080"), - Port: pointer.Int32(8080), - Protocol: &tcpProtocol, + Name: ptr.To("p8080"), + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }} }), }, @@ -1553,7 +1543,6 @@ func TestIPv4Proxier(t *testing.T) { } func TestIPv6Proxier(t *testing.T) { - tcpProtocol := v1.ProtocolTCP tests := []struct { name string services []*v1.Service @@ -1587,9 +1576,9 @@ func TestIPv6Proxier(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) { @@ -1598,9 +1587,9 @@ func TestIPv6Proxier(t *testing.T) { Addresses: []string{"1009:ab8::5:6"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p8080"), - Port: pointer.Int32(8080), - Protocol: &tcpProtocol, + Name: ptr.To("p8080"), + Port: ptr.To[int32](8080), + Protocol: ptr.To(v1.ProtocolTCP), }} }), }, @@ -1804,7 +1793,6 @@ func TestExternalIPs(t *testing.T) { ) epIP := "10.180.0.1" - udpProtocol := v1.ProtocolUDP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 @@ -1812,9 +1800,9 @@ func TestExternalIPs(t *testing.T) { Addresses: []string{epIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &udpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolUDP), }} }), ) @@ -1878,22 +1866,21 @@ func TestOnlyLocalExternalIPs(t *testing.T) { epIP1 := "10.180.1.1" thisHostname := testHostname otherHostname := "other-hostname" - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIP}, - NodeName: pointer.String(thisHostname), + NodeName: ptr.To(thisHostname), }, { Addresses: []string{epIP1}, - NodeName: pointer.String(otherHostname), + NodeName: ptr.To(otherHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -1957,7 +1944,6 @@ func TestLoadBalancer(t *testing.T) { ) epIP := "10.180.0.1" - udpProtocol := v1.ProtocolUDP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 @@ -1965,9 +1951,9 @@ func TestLoadBalancer(t *testing.T) { Addresses: []string{epIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &udpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolUDP), }} }), ) @@ -2043,24 +2029,21 @@ func TestOnlyLocalNodePorts(t *testing.T) { epIP := "10.180.0.1" epIP1 := "10.180.1.1" - thisHostname := testHostname - otherHostname := "other-hostname" - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIP}, - NodeName: &thisHostname, + NodeName: ptr.To(testHostname), }, { Addresses: []string{epIP1}, - NodeName: &otherHostname, + NodeName: ptr.To("other-hostname"), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -2202,7 +2185,6 @@ func TestLoadBalancerSourceRanges(t *testing.T) { Port: "p80", } epIP := "10.180.0.1" - tcpProtocol := v1.ProtocolTCP makeServiceMap(fp, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { @@ -2228,9 +2210,9 @@ func TestLoadBalancerSourceRanges(t *testing.T) { Addresses: []string{epIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -2334,16 +2316,15 @@ func TestAcceptIPVSTraffic(t *testing.T) { }), ) - udpProtocol := v1.ProtocolUDP populateEndpointSlices(fp, makeTestEndpointSlice("ns1", "p80", 1, func(eps *discovery.EndpointSlice) { eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{svcInfo.epIP}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &udpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolUDP), }} }), ) @@ -2398,9 +2379,6 @@ func TestOnlyLocalLoadBalancing(t *testing.T) { epIP := "10.180.0.1" epIP1 := "10.180.1.1" - thisHostname := testHostname - otherHostname := "other-hostname" - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(fp, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { @@ -2408,16 +2386,16 @@ func TestOnlyLocalLoadBalancing(t *testing.T) { eps.Endpoints = []discovery.Endpoint{ { // **local** endpoint address, should be added as RS Addresses: []string{epIP}, - NodeName: &thisHostname, + NodeName: ptr.To(testHostname), }, { // **remote** endpoint address, should not be added as RS Addresses: []string{epIP1}, - NodeName: &otherHostname, + NodeName: ptr.To("other-hostname"), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -2771,7 +2749,6 @@ func TestSessionAffinity(t *testing.T) { NamespacedName: makeNSN("ns1", "svc1"), Port: "p80", } - timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds makeServiceMap(fp, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { @@ -2781,7 +2758,7 @@ func TestSessionAffinity(t *testing.T) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ ClientIP: &v1.ClientIPConfig{ - TimeoutSeconds: &timeoutSeconds, + TimeoutSeconds: ptr.To[int32](v1.DefaultClientIPServiceAffinitySeconds), }, } svc.Spec.Ports = []v1.ServicePort{{ @@ -2815,9 +2792,6 @@ func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.Serv } func Test_updateEndpointsMap(t *testing.T) { - var nodeName = testHostname - udpProtocol := v1.ProtocolUDP - emptyEndpointSlices := []*discovery.EndpointSlice{ makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}), } @@ -2827,9 +2801,9 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } subset2 := func(eps *discovery.EndpointSlice) { @@ -2838,9 +2812,9 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } namedPortLocal := []*discovery.EndpointSlice{ @@ -2849,12 +2823,12 @@ func Test_updateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -2869,9 +2843,9 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11-2"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11-2"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -2883,9 +2857,9 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(22), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -2897,16 +2871,16 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }, { Addresses: []string{"1.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} }), } @@ -2918,12 +2892,12 @@ func Test_updateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsWithLocal := []*discovery.EndpointSlice{ @@ -2934,16 +2908,16 @@ func Test_updateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"1.1.1.1"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } subset3 := func(eps *discovery.EndpointSlice) { @@ -2952,9 +2926,9 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p13"), - Port: pointer.Int32(13), - Protocol: &udpProtocol, + Name: ptr.To("p13"), + Port: ptr.To[int32](13), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{ @@ -2967,16 +2941,16 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.1"}, }, { Addresses: []string{"1.1.1.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }} } subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) { @@ -2985,16 +2959,16 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.3"}, }, { Addresses: []string{"1.1.1.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p13"), - Port: pointer.Int32(13), - Protocol: &udpProtocol, + Name: ptr.To("p13"), + Port: ptr.To[int32](13), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p14"), - Port: pointer.Int32(14), - Protocol: &udpProtocol, + Name: ptr.To("p14"), + Port: ptr.To[int32](14), + Protocol: ptr.To(v1.ProtocolUDP), }} } subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) { @@ -3003,16 +2977,16 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"2.2.2.1"}, }, { Addresses: []string{"2.2.2.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p21"), - Port: pointer.Int32(21), - Protocol: &udpProtocol, + Name: ptr.To("p21"), + Port: ptr.To[int32](21), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p22"), - Port: pointer.Int32(22), - Protocol: &udpProtocol, + Name: ptr.To("p22"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } multipleSubsetsIPsPorts := []*discovery.EndpointSlice{ @@ -3024,54 +2998,54 @@ func Test_updateEndpointsMap(t *testing.T) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"2.2.2.2"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"2.2.2.22"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p22"), - Port: pointer.Int32(22), - Protocol: &udpProtocol, + Name: ptr.To("p22"), + Port: ptr.To[int32](22), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset2 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"2.2.2.3"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p23"), - Port: pointer.Int32(23), - Protocol: &udpProtocol, + Name: ptr.To("p23"), + Port: ptr.To[int32](23), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset3 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"4.4.4.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"4.4.4.5"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p44"), - Port: pointer.Int32(44), - Protocol: &udpProtocol, + Name: ptr.To("p44"), + Port: ptr.To[int32](44), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset4 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"4.4.4.6"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p45"), - Port: pointer.Int32(45), - Protocol: &udpProtocol, + Name: ptr.To("p45"), + Port: ptr.To[int32](45), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset5 := func(eps *discovery.EndpointSlice) { @@ -3082,9 +3056,9 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.11"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p11"), - Port: pointer.Int32(11), - Protocol: &udpProtocol, + Name: ptr.To("p11"), + Port: ptr.To[int32](11), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset6 := func(eps *discovery.EndpointSlice) { @@ -3093,13 +3067,13 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"1.1.1.2"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p12"), - Port: pointer.Int32(12), - Protocol: &udpProtocol, + Name: ptr.To("p12"), + Port: ptr.To[int32](12), + Protocol: ptr.To(v1.ProtocolUDP), }, { - Name: pointer.String("p122"), - Port: pointer.Int32(122), - Protocol: &udpProtocol, + Name: ptr.To("p122"), + Port: ptr.To[int32](122), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset7 := func(eps *discovery.EndpointSlice) { @@ -3108,21 +3082,21 @@ func Test_updateEndpointsMap(t *testing.T) { Addresses: []string{"3.3.3.3"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p33"), - Port: pointer.Int32(33), - Protocol: &udpProtocol, + Name: ptr.To("p33"), + Port: ptr.To[int32](33), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexSubset8 := func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{"4.4.4.4"}, - NodeName: &nodeName, + NodeName: ptr.To(testHostname), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p44"), - Port: pointer.Int32(44), - Protocol: &udpProtocol, + Name: ptr.To("p44"), + Port: ptr.To[int32](44), + Protocol: ptr.To(v1.ProtocolUDP), }} } complexBefore := []*discovery.EndpointSlice{ @@ -3151,16 +3125,16 @@ func Test_updateEndpointsMap(t *testing.T) { name string previousEndpoints []*discovery.EndpointSlice currentEndpoints []*discovery.EndpointSlice - oldEndpoints map[proxy.ServicePortName][]*proxy.BaseEndpointInfo - expectedResult map[proxy.ServicePortName][]*proxy.BaseEndpointInfo + oldEndpoints map[proxy.ServicePortName][]endpointExpectation + expectedResult map[proxy.ServicePortName][]endpointExpectation expectedDeletedUDPEndpoints []proxy.ServiceEndpoint expectedNewlyActiveUDPServices map[proxy.ServicePortName]bool expectedReadyEndpoints map[types.NamespacedName]int }{{ // Case[0]: nothing name: "nothing", - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{}, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{}, + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{}, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, expectedNewlyActiveUDPServices: map[proxy.ServicePortName]bool{}, expectedReadyEndpoints: map[types.NamespacedName]int{}, @@ -3169,14 +3143,14 @@ func Test_updateEndpointsMap(t *testing.T) { name: "no change, named port, local", previousEndpoints: namedPortLocal, currentEndpoints: namedPortLocal, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3189,20 +3163,20 @@ func Test_updateEndpointsMap(t *testing.T) { name: "no change, multiple subsets", previousEndpoints: multipleSubsets, currentEndpoints: multipleSubsets, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3213,26 +3187,26 @@ func Test_updateEndpointsMap(t *testing.T) { name: "no change, multiple subsets, multiple ports, local", previousEndpoints: multipleSubsetsMultiplePortsLocal, currentEndpoints: multipleSubsetsMultiplePortsLocal, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3245,56 +3219,56 @@ func Test_updateEndpointsMap(t *testing.T) { name: "no change, multiple endpoints, subsets, IPs, and ports", previousEndpoints: multipleSubsetsIPsPorts, currentEndpoints: multipleSubsetsIPsPorts, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:13", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false}, + {endpoint: "1.1.1.4:13", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:14", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:14", isLocal: false}, + {endpoint: "1.1.1.4:14", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:21", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:21", isLocal: false}, + {endpoint: "2.2.2.2:21", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:22", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:22", isLocal: false}, + {endpoint: "2.2.2.2:22", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:13", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:13", isLocal: false}, + {endpoint: "1.1.1.4:13", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): { - {Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.4:14", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.3:14", isLocal: false}, + {endpoint: "1.1.1.4:14", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:21", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:21", isLocal: false}, + {endpoint: "2.2.2.2:21", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:22", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.1:22", isLocal: false}, + {endpoint: "2.2.2.2:22", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3308,10 +3282,10 @@ func Test_updateEndpointsMap(t *testing.T) { name: "add an Endpoints", previousEndpoints: []*discovery.EndpointSlice{nil}, currentEndpoints: namedPortLocal, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{}, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3326,12 +3300,12 @@ func Test_updateEndpointsMap(t *testing.T) { name: "remove an Endpoints", previousEndpoints: namedPortLocal, currentEndpoints: []*discovery.EndpointSlice{nil}, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{}, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ Endpoint: "1.1.1.1:11", ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP), @@ -3343,19 +3317,19 @@ func Test_updateEndpointsMap(t *testing.T) { name: "add an IP and port", previousEndpoints: namedPort, currentEndpoints: namedPortsLocalNoLocal, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3370,19 +3344,19 @@ func Test_updateEndpointsMap(t *testing.T) { name: "remove an IP and port", previousEndpoints: namedPortsLocalNoLocal, currentEndpoints: namedPort, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:11", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, + {endpoint: "1.1.1.2:11", isLocal: true}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.2:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:12", isLocal: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -3402,17 +3376,17 @@ func Test_updateEndpointsMap(t *testing.T) { name: "add a subset", previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil}, currentEndpoints: multipleSubsetsWithLocal, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3427,17 +3401,17 @@ func Test_updateEndpointsMap(t *testing.T) { name: "remove a subset", previousEndpoints: multipleSubsets, currentEndpoints: []*discovery.EndpointSlice{namedPort[0], nil}, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -3451,14 +3425,14 @@ func Test_updateEndpointsMap(t *testing.T) { name: "rename a port", previousEndpoints: namedPort, currentEndpoints: namedPortRenamed, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -3474,14 +3448,14 @@ func Test_updateEndpointsMap(t *testing.T) { name: "renumber a port", previousEndpoints: namedPort, currentEndpoints: namedPortRenumbered, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:22", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -3495,41 +3469,41 @@ func Test_updateEndpointsMap(t *testing.T) { name: "complex add and remove", previousEndpoints: complexBefore, currentEndpoints: complexAfter, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): { - {Endpoint: "2.2.2.22:22", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "2.2.2.2:22", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.22:22", isLocal: true}, + {endpoint: "2.2.2.2:22", isLocal: true}, }, makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): { - {Endpoint: "2.2.2.3:23", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "2.2.2.3:23", isLocal: true}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): { - {Endpoint: "4.4.4.4:44", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "4.4.4.5:44", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "4.4.4.4:44", isLocal: true}, + {endpoint: "4.4.4.5:44", isLocal: true}, }, makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): { - {Endpoint: "4.4.4.6:45", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "4.4.4.6:45", isLocal: true}, }, }, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.11:11", isLocal: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:12", isLocal: false}, }, makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): { - {Endpoint: "1.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.2:122", isLocal: false}, }, makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): { - {Endpoint: "3.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "3.3.3.3:33", isLocal: false}, }, makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): { - {Endpoint: "4.4.4.4:44", NodeName: nodeName, IsLocal: true, Ready: true, Serving: true, Terminating: false}, + {endpoint: "4.4.4.4:44", isLocal: true}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{{ @@ -3561,10 +3535,10 @@ func Test_updateEndpointsMap(t *testing.T) { name: "change from 0 endpoint address to 1 named port", previousEndpoints: emptyEndpointSlices, currentEndpoints: namedPort, - oldEndpoints: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{}, - expectedResult: map[proxy.ServicePortName][]*proxy.BaseEndpointInfo{ + oldEndpoints: map[proxy.ServicePortName][]endpointExpectation{}, + expectedResult: map[proxy.ServicePortName][]endpointExpectation{ makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): { - {Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}, + {endpoint: "1.1.1.1:11", isLocal: false}, }, }, expectedDeletedUDPEndpoints: []proxy.ServiceEndpoint{}, @@ -3581,7 +3555,7 @@ func Test_updateEndpointsMap(t *testing.T) { ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) - fp.hostname = nodeName + fp.hostname = testHostname // First check that after adding all previous versions of endpoints, // the fp.oldEndpoints is as we expect. @@ -3591,7 +3565,7 @@ func Test_updateEndpointsMap(t *testing.T) { } } fp.endpointsMap.Update(fp.endpointsChanges) - compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints) + checkEndpointExpectations(t, tci, fp.endpointsMap, tc.oldEndpoints) // Now let's call appropriate handlers to get to state we want to be. if len(tc.previousEndpoints) != len(tc.currentEndpoints) { @@ -3611,7 +3585,7 @@ func Test_updateEndpointsMap(t *testing.T) { } result := fp.endpointsMap.Update(fp.endpointsChanges) newMap := fp.endpointsMap - compareEndpointsMaps(t, tci, newMap, tc.expectedResult) + checkEndpointExpectations(t, tci, newMap, tc.expectedResult) if len(result.DeletedUDPEndpoints) != len(tc.expectedDeletedUDPEndpoints) { t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedDeletedUDPEndpoints), len(result.DeletedUDPEndpoints), result.DeletedUDPEndpoints) } @@ -3650,7 +3624,12 @@ func Test_updateEndpointsMap(t *testing.T) { } } -func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*proxy.BaseEndpointInfo) { +type endpointExpectation struct { + endpoint string + isLocal bool +} + +func checkEndpointExpectations(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]endpointExpectation) { if len(newMap) != len(expected) { t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap) } @@ -3659,12 +3638,9 @@ func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expe t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x])) } else { for i := range expected[x] { - newEp, ok := newMap[x][i].(*proxy.BaseEndpointInfo) - if !ok { - t.Errorf("Failed to cast proxy.BaseEndpointInfo") - continue - } - if !reflect.DeepEqual(*newEp, *(expected[x][i])) { + newEp := newMap[x][i] + if newEp.String() != expected[x][i].endpoint || + newEp.IsLocal() != expected[x][i].isLocal { t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp) } } @@ -4358,7 +4334,7 @@ raid10 57344 0 - Live 0xffffffffc0597000`, } // The majority of EndpointSlice specific tests are not ipvs specific and focus on -// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the +// the shared EndpointsChangeTracker and EndpointSliceCache. This test ensures that the // ipvs proxier supports translating EndpointSlices to ipvs output. func TestEndpointSliceE2E(t *testing.T) { ipt := iptablestest.NewFake() @@ -4381,7 +4357,6 @@ func TestEndpointSliceE2E(t *testing.T) { }) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -4389,27 +4364,27 @@ func TestEndpointSliceE2E(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String("node2"), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To("node2"), }, { Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String("node3"), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To("node3"), }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(false)}, - NodeName: pointer.String("node3"), + Conditions: discovery.EndpointConditions{Ready: ptr.To(false)}, + NodeName: ptr.To("node3"), }}, } @@ -4521,7 +4496,6 @@ func Test_HealthCheckNodePortWhenTerminating(t *testing.T) { }, }) - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -4529,26 +4503,26 @@ func Test_HealthCheckNodePortWhenTerminating(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(false)}, - NodeName: pointer.String(testHostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(false)}, + NodeName: ptr.To(testHostname), }}, } @@ -4567,43 +4541,43 @@ func Test_HealthCheckNodePortWhenTerminating(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { // not ready endpoints should be ignored Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }}, } @@ -4662,6 +4636,7 @@ func TestCreateAndLinkKubeChain(t *testing.T) { :KUBE-PROXY-FIREWALL - [0:0] :KUBE-SOURCE-RANGES-FIREWALL - [0:0] :KUBE-IPVS-FILTER - [0:0] +:KUBE-IPVS-OUT-FILTER - [0:0] ` assert.Equal(t, expectedNATChains, fp.natChains.String()) assert.Equal(t, expectedFilterChains, fp.filterChains.String()) @@ -4675,9 +4650,6 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { hostname string } - cluster := v1.ServiceInternalTrafficPolicyCluster - local := v1.ServiceInternalTrafficPolicyLocal - testCases := []struct { name string internalTrafficPolicy *v1.ServiceInternalTrafficPolicy @@ -4689,7 +4661,7 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { }{ { name: "internalTrafficPolicy is cluster with non-zero local endpoints", - internalTrafficPolicy: &cluster, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -4706,7 +4678,7 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { }, { name: "internalTrafficPolicy is cluster with zero local endpoints", - internalTrafficPolicy: &cluster, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -4723,7 +4695,7 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { }, { name: "internalTrafficPolicy is local with non-zero local endpoints", - internalTrafficPolicy: &local, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -4738,7 +4710,7 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { }, { name: "internalTrafficPolicy is local with zero local endpoints", - internalTrafficPolicy: &local, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -4778,7 +4750,6 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { fp.OnServiceAdd(svc) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -4786,9 +4757,9 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, } @@ -4796,8 +4767,8 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { for _, ep := range tc.endpoints { endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{ Addresses: []string{ep.ip}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(ep.hostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(ep.hostname), }) } @@ -4857,8 +4828,6 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { // fp.endpointsSynced = true fp.endpointSlicesSynced = true - clusterInternalTrafficPolicy := v1.ServiceInternalTrafficPolicyCluster - serviceName := "svc1" // Add initial service namespaceName := "ns1" @@ -4869,7 +4838,7 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { Selector: map[string]string{"foo": "bar"}, Type: v1.ServiceTypeNodePort, ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster, - InternalTrafficPolicy: &clusterInternalTrafficPolicy, + InternalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), ExternalIPs: []string{ "1.2.3.4", }, @@ -4885,7 +4854,6 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { }) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -4893,56 +4861,56 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, }, } @@ -5033,8 +5001,6 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { // fp.endpointsSynced = true fp.endpointSlicesSynced = true - clusterInternalTrafficPolicy := v1.ServiceInternalTrafficPolicyCluster - serviceName := "svc1" // Add initial service namespaceName := "ns1" @@ -5045,7 +5011,7 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { Selector: map[string]string{"foo": "bar"}, Type: v1.ServiceTypeNodePort, ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, - InternalTrafficPolicy: &clusterInternalTrafficPolicy, + InternalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), ExternalIPs: []string{ "1.2.3.4", }, @@ -5061,7 +5027,6 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { }) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -5069,56 +5034,56 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, }, } @@ -5208,8 +5173,6 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { // fp.endpointsSynced = true fp.endpointSlicesSynced = true - clusterInternalTrafficPolicy := v1.ServiceInternalTrafficPolicyCluster - // Add initial service serviceName := "svc1" namespaceName := "ns1" @@ -5220,7 +5183,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { Selector: map[string]string{"foo": "bar"}, Type: v1.ServiceTypeNodePort, ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyCluster, - InternalTrafficPolicy: &clusterInternalTrafficPolicy, + InternalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), ExternalIPs: []string{ "1.2.3.4", }, @@ -5236,7 +5199,6 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { }) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -5244,56 +5206,56 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, { Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(false), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(false), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, }, } @@ -5383,8 +5345,6 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { // fp.endpointsSynced = true fp.endpointSlicesSynced = true - clusterInternalTrafficPolicy := v1.ServiceInternalTrafficPolicyCluster - // Add initial service serviceName := "svc1" namespaceName := "ns1" @@ -5395,7 +5355,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { Selector: map[string]string{"foo": "bar"}, Type: v1.ServiceTypeNodePort, ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, - InternalTrafficPolicy: &clusterInternalTrafficPolicy, + InternalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyCluster), ExternalIPs: []string{ "1.2.3.4", }, @@ -5411,7 +5371,6 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { }) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -5419,56 +5378,56 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To(""), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{ { Addresses: []string{"10.0.1.1"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.2"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.3"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(false), + Terminating: ptr.To(true), }, - NodeName: pointer.String(testHostname), + NodeName: ptr.To(testHostname), }, { Addresses: []string{"10.0.1.4"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), + Ready: ptr.To(false), + Serving: ptr.To(true), + Terminating: ptr.To(true), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, { Addresses: []string{"10.0.1.5"}, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, - NodeName: pointer.String("another-host"), + NodeName: ptr.To("another-host"), }, }, } @@ -5640,8 +5599,6 @@ func TestNoEndpointsMetric(t *testing.T) { hostname string } - internalTrafficPolicyLocal := v1.ServiceInternalTrafficPolicyLocal - externalTrafficPolicyLocal := v1.ServiceExternalTrafficPolicyLocal metrics.RegisterMetrics() testCases := []struct { @@ -5654,7 +5611,7 @@ func TestNoEndpointsMetric(t *testing.T) { }{ { name: "internalTrafficPolicy is set and there are local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -5663,7 +5620,7 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "externalTrafficPolicy is set and there are local endpoints", - externalTrafficPolicy: externalTrafficPolicyLocal, + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -5672,8 +5629,8 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "both policies are set and there are local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, - externalTrafficPolicy: externalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", testHostname}, {"10.0.1.2", "host1"}, @@ -5682,7 +5639,7 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "internalTrafficPolicy is set and there are no local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -5692,7 +5649,7 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "externalTrafficPolicy is set and there are no local endpoints", - externalTrafficPolicy: externalTrafficPolicyLocal, + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -5702,8 +5659,8 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "Both policies are set and there are no local endpoints", - internalTrafficPolicy: &internalTrafficPolicyLocal, - externalTrafficPolicy: externalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{ {"10.0.1.1", "host0"}, {"10.0.1.2", "host1"}, @@ -5714,8 +5671,8 @@ func TestNoEndpointsMetric(t *testing.T) { }, { name: "Both policies are set and there are no endpoints at all", - internalTrafficPolicy: &internalTrafficPolicyLocal, - externalTrafficPolicy: externalTrafficPolicyLocal, + internalTrafficPolicy: ptr.To(v1.ServiceInternalTrafficPolicyLocal), + externalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, endpoints: []endpoint{}, expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0, expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0, @@ -5753,7 +5710,6 @@ func TestNoEndpointsMetric(t *testing.T) { fp.OnServiceAdd(svc) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", serviceName), @@ -5761,9 +5717,9 @@ func TestNoEndpointsMetric(t *testing.T) { Labels: map[string]string{discovery.LabelServiceName: serviceName}, }, Ports: []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, } @@ -5771,8 +5727,8 @@ func TestNoEndpointsMetric(t *testing.T) { for _, ep := range tc.endpoints { endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{ Addresses: []string{ep.ip}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String(ep.hostname), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To(ep.hostname), }) } @@ -5841,9 +5797,6 @@ func TestDismissLocalhostRuleExist(t *testing.T) { } func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { - ipModeProxy := v1.LoadBalancerIPModeProxy - ipModeVIP := v1.LoadBalancerIPModeVIP - testCases := []struct { name string ipModeEnabled bool @@ -5858,7 +5811,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: false, svcIP: "10.20.30.41", svcLBIP: "1.2.3.4", - ipMode: &ipModeProxy, + ipMode: ptr.To(v1.LoadBalancerIPModeProxy), expectedServices: 2, }, { @@ -5866,7 +5819,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: false, svcIP: "10.20.30.42", svcLBIP: "1.2.3.5", - ipMode: &ipModeVIP, + ipMode: ptr.To(v1.LoadBalancerIPModeVIP), expectedServices: 2, }, { @@ -5883,7 +5836,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: true, svcIP: "10.20.30.41", svcLBIP: "1.2.3.4", - ipMode: &ipModeProxy, + ipMode: ptr.To(v1.LoadBalancerIPModeProxy), expectedServices: 1, }, { @@ -5891,7 +5844,7 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { ipModeEnabled: true, svcIP: "10.20.30.42", svcLBIP: "1.2.3.5", - ipMode: &ipModeVIP, + ipMode: ptr.To(v1.LoadBalancerIPModeVIP), expectedServices: 2, }, { @@ -5932,7 +5885,6 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { }), ) - tcpProtocol := v1.ProtocolTCP makeEndpointSliceMap(fp, makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 @@ -5940,9 +5892,9 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { Addresses: []string{"10.180.0.1"}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String("p80"), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) diff --git a/pkg/proxy/kubemark/hollow_proxy.go b/pkg/proxy/kubemark/hollow_proxy.go index 1462c30a84f76..f67b74835f29b 100644 --- a/pkg/proxy/kubemark/hollow_proxy.go +++ b/pkg/proxy/kubemark/hollow_proxy.go @@ -38,7 +38,7 @@ import ( utilnode "k8s.io/kubernetes/pkg/util/node" utilexec "k8s.io/utils/exec" netutils "k8s.io/utils/net" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "k8s.io/klog/v2" ) @@ -109,6 +109,7 @@ func NewHollowProxyOrDie( recorder, nil, []string{}, + false, ) if err != nil { return nil, fmt.Errorf("unable to create proxier: %v", err) @@ -129,7 +130,7 @@ func NewHollowProxyOrDie( Config: &proxyconfigapi.KubeProxyConfiguration{ Mode: proxyconfigapi.ProxyMode("fake"), ConfigSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - OOMScoreAdj: utilpointer.Int32Ptr(0), + OOMScoreAdj: ptr.To[int32](0), }, Client: client, diff --git a/pkg/proxy/node.go b/pkg/proxy/node.go index 6ae87fdcac66d..49df72cd1d732 100644 --- a/pkg/proxy/node.go +++ b/pkg/proxy/node.go @@ -90,7 +90,7 @@ func (n *NodePodCIDRHandler) OnNodeSynced() {} // NodeEligibleHandler handles the life cycle of the Node's eligibility, as // determined by the health server for directing load balancer traffic. type NodeEligibleHandler struct { - HealthServer healthcheck.ProxierHealthUpdater + HealthServer *healthcheck.ProxierHealthServer } var _ config.NodeHandler = &NodeEligibleHandler{} diff --git a/pkg/proxy/service_test.go b/pkg/proxy/service_test.go index e8d93f2b597b9..f21b268dff37b 100644 --- a/pkg/proxy/service_test.go +++ b/pkg/proxy/service_test.go @@ -595,7 +595,7 @@ func TestServiceToServiceMap(t *testing.T) { } type FakeProxier struct { - endpointsChanges *EndpointChangeTracker + endpointsChanges *EndpointsChangeTracker serviceChanges *ServiceChangeTracker svcPortMap ServicePortMap endpointsMap EndpointsMap @@ -607,7 +607,7 @@ func newFakeProxier(ipFamily v1.IPFamily, t time.Time) *FakeProxier { svcPortMap: make(ServicePortMap), serviceChanges: NewServiceChangeTracker(nil, ipFamily, nil, nil), endpointsMap: make(EndpointsMap), - endpointsChanges: &EndpointChangeTracker{ + endpointsChanges: &EndpointsChangeTracker{ lastChangeTriggerTimes: make(map[types.NamespacedName][]time.Time), trackerStartTime: t, processEndpointsMapChange: nil, diff --git a/pkg/proxy/topology.go b/pkg/proxy/topology.go index 52f539b659fb7..9b75ab4023046 100644 --- a/pkg/proxy/topology.go +++ b/pkg/proxy/topology.go @@ -84,12 +84,12 @@ func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels m for _, ep := range endpoints { if ep.IsReady() { hasAnyEndpoints = true - if ep.GetIsLocal() { + if ep.IsLocal() { hasLocalReadyEndpoints = true } } else if ep.IsServing() && ep.IsTerminating() { hasAnyEndpoints = true - if ep.GetIsLocal() { + if ep.IsLocal() { hasLocalServingTerminatingEndpoints = true } } @@ -97,12 +97,12 @@ func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels m if hasLocalReadyEndpoints { localEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool { - return ep.GetIsLocal() && ep.IsReady() + return ep.IsLocal() && ep.IsReady() }) } else if hasLocalServingTerminatingEndpoints { useServingTerminatingEndpoints = true localEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool { - return ep.GetIsLocal() && ep.IsServing() && ep.IsTerminating() + return ep.IsLocal() && ep.IsServing() && ep.IsTerminating() }) } @@ -165,12 +165,12 @@ func canUseTopology(endpoints []Endpoint, svcInfo ServicePort, nodeLabels map[st if !endpoint.IsReady() { continue } - if endpoint.GetZoneHints().Len() == 0 { - klog.InfoS("Skipping topology aware endpoint filtering since one or more endpoints is missing a zone hint") + if endpoint.ZoneHints().Len() == 0 { + klog.InfoS("Skipping topology aware endpoint filtering since one or more endpoints is missing a zone hint", "endpoint", endpoint) return false } - if endpoint.GetZoneHints().Has(zone) { + if endpoint.ZoneHints().Has(zone) { hasEndpointForZone = true } } @@ -187,7 +187,7 @@ func canUseTopology(endpoints []Endpoint, svcInfo ServicePort, nodeLabels map[st // topology constraints. (It assumes that canUseTopology() returned true.) func availableForTopology(endpoint Endpoint, nodeLabels map[string]string) bool { zone := nodeLabels[v1.LabelTopologyZone] - return endpoint.GetZoneHints().Has(zone) + return endpoint.ZoneHints().Has(zone) } // filterEndpoints filters endpoints according to predicate diff --git a/pkg/proxy/topology_test.go b/pkg/proxy/topology_test.go index 53fc6e5199298..e3865947ea7f0 100644 --- a/pkg/proxy/topology_test.go +++ b/pkg/proxy/topology_test.go @@ -70,10 +70,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.6:80"), localEndpoints: nil, @@ -83,10 +83,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "disabled"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.5:80", "10.1.2.6:80"), localEndpoints: nil, @@ -96,10 +96,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.5:80", "10.1.2.6:80"), localEndpoints: nil, @@ -110,10 +110,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "aUto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.6:80"), localEndpoints: nil, @@ -123,10 +123,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.5:80", "10.1.2.6:80"), localEndpoints: nil, @@ -136,10 +136,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{externalPolicyLocal: true, nodePort: 8080, hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.6:80"), localEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80"), @@ -150,10 +150,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, hintsAnnotation: "auto", externalPolicyLocal: false, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.6:80"), localEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80"), @@ -164,7 +164,7 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80"), localEndpoints: nil, @@ -174,7 +174,7 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: ""}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80"), localEndpoints: nil, @@ -184,7 +184,7 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-b"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80"), localEndpoints: nil, @@ -194,10 +194,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.6:80"), localEndpoints: nil, @@ -207,10 +207,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: false}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: false}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80"), localEndpoints: nil, @@ -220,10 +220,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: false}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: false}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: false}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: false}, }, clusterEndpoints: sets.New[string]("10.1.2.4:80", "10.1.2.5:80"), localEndpoints: nil, @@ -233,10 +233,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "Auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.6:80"), localEndpoints: nil, @@ -246,10 +246,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: ""}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.5:80", "10.1.2.6:80"), localEndpoints: nil, @@ -259,10 +259,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "disabled"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.5:80", "10.1.2.6:80"), localEndpoints: nil, @@ -272,10 +272,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: nil, Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-a"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: nil, ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-a"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.5:80", "10.1.2.6:80"), localEndpoints: nil, @@ -285,10 +285,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-c"}, serviceInfo: &BaseServicePortInfo{hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.1.2.3:80", ZoneHints: sets.New[string]("zone-a", "zone-b", "zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.4:80", ZoneHints: sets.New[string]("zone-b", "zone-c"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.5:80", ZoneHints: sets.New[string]("zone-b", "zone-d"), Ready: true}, - &BaseEndpointInfo{Endpoint: "10.1.2.6:80", ZoneHints: sets.New[string]("zone-c"), Ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.3:80", zoneHints: sets.New[string]("zone-a", "zone-b", "zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.4:80", zoneHints: sets.New[string]("zone-b", "zone-c"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.5:80", zoneHints: sets.New[string]("zone-b", "zone-d"), ready: true}, + &BaseEndpointInfo{endpoint: "10.1.2.6:80", zoneHints: sets.New[string]("zone-c"), ready: true}, }, clusterEndpoints: sets.New[string]("10.1.2.3:80", "10.1.2.4:80", "10.1.2.6:80"), localEndpoints: nil, @@ -298,10 +298,10 @@ func TestCategorizeEndpoints(t *testing.T) { nodeLabels: map[string]string{v1.LabelTopologyZone: "zone-a"}, serviceInfo: &BaseServicePortInfo{internalPolicyLocal: false, externalPolicyLocal: true, nodePort: 8080, hintsAnnotation: "auto"}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", ZoneHints: sets.New[string]("zone-a"), Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", ZoneHints: sets.New[string]("zone-b"), Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.2:80", ZoneHints: sets.New[string]("zone-a"), Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.3:80", ZoneHints: sets.New[string]("zone-b"), Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", zoneHints: sets.New[string]("zone-a"), ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", zoneHints: sets.New[string]("zone-b"), ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.2:80", zoneHints: sets.New[string]("zone-a"), ready: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.3:80", zoneHints: sets.New[string]("zone-b"), ready: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.2:80"), localEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), @@ -316,8 +316,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "internalTrafficPolicy: Local, but all endpoints are remote", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: false}, }, clusterEndpoints: nil, localEndpoints: sets.New[string](), @@ -326,8 +326,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "internalTrafficPolicy: Local, all endpoints are local", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: true}, }, clusterEndpoints: nil, localEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), @@ -335,8 +335,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "internalTrafficPolicy: Local, some endpoints are local", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: false}, }, clusterEndpoints: nil, localEndpoints: sets.New[string]("10.0.0.0:80"), @@ -344,8 +344,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "Cluster traffic policy, endpoints not Ready", serviceInfo: &BaseServicePortInfo{}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: false}, }, clusterEndpoints: sets.New[string](), localEndpoints: nil, @@ -353,8 +353,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "Cluster traffic policy, some endpoints are Ready", serviceInfo: &BaseServicePortInfo{}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true}, }, clusterEndpoints: sets.New[string]("10.0.0.1:80"), localEndpoints: nil, @@ -363,8 +363,8 @@ func TestCategorizeEndpoints(t *testing.T) { pteEnabled: true, serviceInfo: &BaseServicePortInfo{}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: false, Serving: true, Terminating: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: false, serving: true, terminating: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: false, serving: true, terminating: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: nil, @@ -372,8 +372,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "iTP: Local, eTP: Cluster, some endpoints local", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: false, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: sets.New[string]("10.0.0.0:80"), @@ -382,8 +382,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "iTP: Cluster, eTP: Local, some endpoints local", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: false, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: sets.New[string]("10.0.0.0:80"), @@ -392,8 +392,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "iTP: Local, eTP: Local, some endpoints local", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: sets.New[string]("10.0.0.0:80"), @@ -402,8 +402,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "iTP: Local, eTP: Local, all endpoints remote", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: sets.New[string](), @@ -413,8 +413,8 @@ func TestCategorizeEndpoints(t *testing.T) { pteEnabled: true, serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: false, serving: true, terminating: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: false, serving: true, terminating: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: sets.New[string](), @@ -425,10 +425,10 @@ func TestCategorizeEndpoints(t *testing.T) { pteEnabled: true, serviceInfo: &BaseServicePortInfo{internalPolicyLocal: false, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false, Serving: false, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.2:80", Ready: false, Serving: true, Terminating: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.3:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: false, serving: false, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.2:80", ready: false, serving: true, terminating: true, isLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.3:80", ready: false, serving: true, terminating: true, isLocal: false}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80"), localEndpoints: sets.New[string]("10.0.0.2:80"), @@ -437,8 +437,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "externalTrafficPolicy ignored if not externally accessible", serviceInfo: &BaseServicePortInfo{externalPolicyLocal: true}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: true}, }, clusterEndpoints: sets.New[string]("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: nil, @@ -447,8 +447,8 @@ func TestCategorizeEndpoints(t *testing.T) { name: "no cluster endpoints for iTP:Local internal-only service", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true}, endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: true, IsLocal: true}, + &BaseEndpointInfo{endpoint: "10.0.0.0:80", ready: true, isLocal: false}, + &BaseEndpointInfo{endpoint: "10.0.0.1:80", ready: true, isLocal: true}, }, clusterEndpoints: nil, localEndpoints: sets.New[string]("10.0.0.1:80"), diff --git a/pkg/proxy/types.go b/pkg/proxy/types.go index 4e2d23ed33013..5bb3bbefe9d1f 100644 --- a/pkg/proxy/types.go +++ b/pkg/proxy/types.go @@ -108,33 +108,26 @@ type Endpoint interface { // String returns endpoint string. An example format can be: `IP:Port`. // We take the returned value as ServiceEndpoint.Endpoint. String() string - // GetIsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false. - GetIsLocal() bool - // IsReady returns true if an endpoint is ready and not terminating. - // This is only set when watching EndpointSlices. If using Endpoints, this is always - // true since only ready endpoints are read from Endpoints. + // IP returns IP part of the endpoint. + IP() string + // Port returns the Port part of the endpoint. + Port() (int, error) + + // IsLocal returns true if the endpoint is running on the same host as kube-proxy. + IsLocal() bool + // IsReady returns true if an endpoint is ready and not terminating, or + // if PublishNotReadyAddresses is set on the service. IsReady() bool // IsServing returns true if an endpoint is ready. It does not account // for terminating state. - // This is only set when watching EndpointSlices. If using Endpoints, this is always - // true since only ready endpoints are read from Endpoints. IsServing() bool // IsTerminating returns true if an endpoint is terminating. For pods, // that is any pod with a deletion timestamp. - // This is only set when watching EndpointSlices. If using Endpoints, this is always - // false since terminating endpoints are always excluded from Endpoints. IsTerminating() bool - // GetZoneHints returns the zone hint for the endpoint. This is based on + + // ZoneHints returns the zone hint for the endpoint. This is based on // endpoint.hints.forZones[0].name in the EndpointSlice API. - GetZoneHints() sets.Set[string] - // IP returns IP part of the endpoint. - IP() string - // Port returns the Port part of the endpoint. - Port() (int, error) - // GetNodeName returns the node name for the endpoint - GetNodeName() string - // GetZone returns the zone for the endpoint - GetZone() string + ZoneHints() sets.Set[string] } // ServiceEndpoint is used to identify a service and one of its endpoint pair. diff --git a/pkg/proxy/util/utils_test.go b/pkg/proxy/util/utils_test.go index aa3a32dd2c599..537e77570ae07 100644 --- a/pkg/proxy/util/utils_test.go +++ b/pkg/proxy/util/utils_test.go @@ -698,7 +698,7 @@ func TestRevertPorts(t *testing.T) { } } for _, lp := range tc.existingPorts { - if existingPortsMap[lp].(*fakeClosable).closed == true { + if existingPortsMap[lp].(*fakeClosable).closed { t.Errorf("Expect existing localport %v to be false in test case %v", lp, i) } } diff --git a/pkg/proxy/winkernel/hns.go b/pkg/proxy/winkernel/hns.go index 9fa83e14ef92b..b11eb7cacd4ea 100644 --- a/pkg/proxy/winkernel/hns.go +++ b/pkg/proxy/winkernel/hns.go @@ -32,13 +32,13 @@ import ( type HostNetworkService interface { getNetworkByName(name string) (*hnsNetworkInfo, error) - getAllEndpointsByNetwork(networkName string) (map[string]*endpointsInfo, error) - getEndpointByID(id string) (*endpointsInfo, error) - getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) - getEndpointByName(id string) (*endpointsInfo, error) - createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) + getAllEndpointsByNetwork(networkName string) (map[string]*endpointInfo, error) + getEndpointByID(id string) (*endpointInfo, error) + getEndpointByIpAddress(ip string, networkName string) (*endpointInfo, error) + getEndpointByName(id string) (*endpointInfo, error) + createEndpoint(ep *endpointInfo, networkName string) (*endpointInfo, error) deleteEndpoint(hnsID string) error - getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16, previousLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) (*loadBalancerInfo, error) + getLoadBalancer(endpoints []endpointInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16, previousLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) (*loadBalancerInfo, error) getAllLoadBalancers() (map[loadBalancerIdentifier]*loadBalancerInfo, error) deleteLoadBalancer(hnsID string) error } @@ -87,7 +87,7 @@ func (hns hns) getNetworkByName(name string) (*hnsNetworkInfo, error) { }, nil } -func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpointsInfo), error) { +func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpointInfo), error) { hcnnetwork, err := hns.hcn.GetNetworkByName(networkName) if err != nil { klog.ErrorS(err, "failed to get HNS network by name", "name", networkName) @@ -97,7 +97,7 @@ func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpoi if err != nil { return nil, fmt.Errorf("failed to list endpoints: %w", err) } - endpointInfos := make(map[string]*(endpointsInfo)) + endpointInfos := make(map[string]*(endpointInfo)) for _, ep := range endpoints { if len(ep.IpConfigurations) == 0 { @@ -108,7 +108,7 @@ func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpoi // Add to map with key endpoint ID or IP address // Storing this is expensive in terms of memory, however there is a bug in Windows Server 2019 that can cause two endpoints to be created with the same IP address. // TODO: Store by IP only and remove any lookups by endpoint ID. - endpointInfos[ep.Id] = &endpointsInfo{ + endpointInfos[ep.Id] = &endpointInfo{ ip: ep.IpConfigurations[0].IpAddress, isLocal: uint32(ep.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, macAddress: ep.MacAddress, @@ -127,7 +127,7 @@ func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpoi // If ipFamilyPolicy is RequireDualStack or PreferDualStack, then there will be 2 IPS (iPV4 and IPV6) // in the endpoint list - endpointDualstack := &endpointsInfo{ + endpointDualstack := &endpointInfo{ ip: ep.IpConfigurations[1].IpAddress, isLocal: uint32(ep.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, macAddress: ep.MacAddress, @@ -145,12 +145,12 @@ func (hns hns) getAllEndpointsByNetwork(networkName string) (map[string]*(endpoi return endpointInfos, nil } -func (hns hns) getEndpointByID(id string) (*endpointsInfo, error) { +func (hns hns) getEndpointByID(id string) (*endpointInfo, error) { hnsendpoint, err := hns.hcn.GetEndpointByID(id) if err != nil { return nil, err } - return &endpointsInfo{ //TODO: fill out PA + return &endpointInfo{ //TODO: fill out PA ip: hnsendpoint.IpConfigurations[0].IpAddress, isLocal: uint32(hnsendpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote macAddress: hnsendpoint.MacAddress, @@ -158,7 +158,7 @@ func (hns hns) getEndpointByID(id string) (*endpointsInfo, error) { hns: hns, }, nil } -func (hns hns) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) { +func (hns hns) getEndpointByIpAddress(ip string, networkName string) (*endpointInfo, error) { hnsnetwork, err := hns.hcn.GetNetworkByName(networkName) if err != nil { klog.ErrorS(err, "Error getting network by name") @@ -179,7 +179,7 @@ func (hns hns) getEndpointByIpAddress(ip string, networkName string) (*endpoints } } if equal && strings.EqualFold(endpoint.HostComputeNetwork, hnsnetwork.Id) { - return &endpointsInfo{ + return &endpointInfo{ ip: ip, isLocal: uint32(endpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote macAddress: endpoint.MacAddress, @@ -190,12 +190,12 @@ func (hns hns) getEndpointByIpAddress(ip string, networkName string) (*endpoints } return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName) } -func (hns hns) getEndpointByName(name string) (*endpointsInfo, error) { +func (hns hns) getEndpointByName(name string) (*endpointInfo, error) { hnsendpoint, err := hns.hcn.GetEndpointByName(name) if err != nil { return nil, err } - return &endpointsInfo{ //TODO: fill out PA + return &endpointInfo{ //TODO: fill out PA ip: hnsendpoint.IpConfigurations[0].IpAddress, isLocal: uint32(hnsendpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote macAddress: hnsendpoint.MacAddress, @@ -203,7 +203,7 @@ func (hns hns) getEndpointByName(name string) (*endpointsInfo, error) { hns: hns, }, nil } -func (hns hns) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) { +func (hns hns) createEndpoint(ep *endpointInfo, networkName string) (*endpointInfo, error) { hnsNetwork, err := hns.hcn.GetNetworkByName(networkName) if err != nil { return nil, err @@ -251,7 +251,7 @@ func (hns hns) createEndpoint(ep *endpointsInfo, networkName string) (*endpoints return nil, err } } - return &endpointsInfo{ + return &endpointInfo{ ip: createdEndpoint.IpConfigurations[0].IpAddress, isLocal: uint32(createdEndpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, macAddress: createdEndpoint.MacAddress, @@ -273,7 +273,7 @@ func (hns hns) deleteEndpoint(hnsID string) error { } // findLoadBalancerID will construct a id from the provided loadbalancer fields -func findLoadBalancerID(endpoints []endpointsInfo, vip string, protocol, internalPort, externalPort uint16) (loadBalancerIdentifier, error) { +func findLoadBalancerID(endpoints []endpointInfo, vip string, protocol, internalPort, externalPort uint16) (loadBalancerIdentifier, error) { // Compute hash from backends (endpoint IDs) hash, err := hashEndpoints(endpoints) if err != nil { @@ -315,7 +315,7 @@ func (hns hns) getAllLoadBalancers() (map[loadBalancerIdentifier]*loadBalancerIn return loadBalancers, nil } -func (hns hns) getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16, previousLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) (*loadBalancerInfo, error) { +func (hns hns) getLoadBalancer(endpoints []endpointInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16, previousLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) (*loadBalancerInfo, error) { var id loadBalancerIdentifier vips := []string{} // Compute hash from backends (endpoint IDs) @@ -424,7 +424,7 @@ func (hns hns) deleteLoadBalancer(hnsID string) error { } // Calculates a hash from the given endpoint IDs. -func hashEndpoints[T string | endpointsInfo](endpoints []T) (hash [20]byte, err error) { +func hashEndpoints[T string | endpointInfo](endpoints []T) (hash [20]byte, err error) { var id string // Recover in case something goes wrong. Return error and null byte array. defer func() { @@ -437,7 +437,7 @@ func hashEndpoints[T string | endpointsInfo](endpoints []T) (hash [20]byte, err // Iterate over endpoints, compute hash for _, ep := range endpoints { switch x := any(ep).(type) { - case endpointsInfo: + case endpointInfo: id = strings.ToUpper(x.hnsID) case string: id = x diff --git a/pkg/proxy/winkernel/hns_test.go b/pkg/proxy/winkernel/hns_test.go index 6e674425f785a..1567d467c0954 100644 --- a/pkg/proxy/winkernel/hns_test.go +++ b/pkg/proxy/winkernel/hns_test.go @@ -203,7 +203,7 @@ func TestCreateEndpointLocal(t *testing.T) { hns := hns{hcn: newHcnImpl()} Network := mustTestNetwork(t) - endpoint := &endpointsInfo{ + endpoint := &endpointInfo{ ip: epIpAddress, macAddress: epMacAddress, isLocal: true, @@ -242,7 +242,7 @@ func TestCreateEndpointRemote(t *testing.T) { Network := mustTestNetwork(t) providerAddress := epPaAddress - endpoint := &endpointsInfo{ + endpoint := &endpointInfo{ ip: epIpAddressRemote, macAddress: epMacAddress, isLocal: false, @@ -350,11 +350,11 @@ func TestGetLoadBalancerExisting(t *testing.T) { if err != nil { t.Error(err) } - endpoint := &endpointsInfo{ + endpoint := &endpointInfo{ ip: Endpoint.IpConfigurations[0].IpAddress, hnsID: Endpoint.Id, } - endpoints := []endpointsInfo{*endpoint} + endpoints := []endpointInfo{*endpoint} hash, err := hashEndpoints(endpoints) if err != nil { t.Error(err) @@ -409,11 +409,11 @@ func TestGetLoadBalancerNew(t *testing.T) { if err != nil { t.Error(err) } - endpoint := &endpointsInfo{ + endpoint := &endpointInfo{ ip: Endpoint.IpConfigurations[0].IpAddress, hnsID: Endpoint.Id, } - endpoints := []endpointsInfo{*endpoint} + endpoints := []endpointInfo{*endpoint} lb, err := hns.getLoadBalancer(endpoints, loadBalancerFlags{}, sourceVip, serviceVip, protocol, internalPort, externalPort, lbs) if err != nil { t.Error(err) @@ -523,7 +523,7 @@ func TestHashEndpoints(t *testing.T) { if err != nil { t.Error(err) } - endpointInfoA := &endpointsInfo{ + endpointInfoA := &endpointInfo{ ip: endpointA.IpConfigurations[0].IpAddress, hnsID: endpointA.Id, } @@ -543,12 +543,12 @@ func TestHashEndpoints(t *testing.T) { if err != nil { t.Error(err) } - endpointInfoB := &endpointsInfo{ + endpointInfoB := &endpointInfo{ ip: endpointB.IpConfigurations[0].IpAddress, hnsID: endpointB.Id, } - endpoints := []endpointsInfo{*endpointInfoA, *endpointInfoB} - endpointsReverse := []endpointsInfo{*endpointInfoB, *endpointInfoA} + endpoints := []endpointInfo{*endpointInfoA, *endpointInfoB} + endpointsReverse := []endpointInfo{*endpointInfoB, *endpointInfoA} h1, err := hashEndpoints(endpoints) if err != nil { t.Error(err) diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index 642a60cdeceb7..3682d00e9c09b 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -123,7 +123,7 @@ type serviceInfo struct { hnsID string nodePorthnsID string policyApplied bool - remoteEndpoint *endpointsInfo + remoteEndpoint *endpointInfo hns HostNetworkService preserveDIP bool localTrafficDSR bool @@ -171,7 +171,7 @@ func logFormattedEndpoints(logMsg string, logLevel klog.Level, svcPortName proxy if klog.V(logLevel).Enabled() { var epInfo string for _, v := range eps { - epInfo = epInfo + fmt.Sprintf("\n %s={Ready:%v,Serving:%v,Terminating:%v,IsRemote:%v}", v.String(), v.IsReady(), v.IsServing(), v.IsTerminating(), !v.GetIsLocal()) + epInfo = epInfo + fmt.Sprintf("\n %s={Ready:%v,Serving:%v,Terminating:%v,IsRemote:%v}", v.String(), v.IsReady(), v.IsServing(), v.IsTerminating(), !v.IsLocal()) } klog.V(logLevel).InfoS(logMsg, "svcPortName", svcPortName, "endpoints", epInfo) } @@ -273,7 +273,7 @@ func (t DualStackCompatTester) DualStackCompatible(networkName string) bool { } // internal struct for endpoints information -type endpointsInfo struct { +type endpointInfo struct { ip string port uint16 isLocal bool @@ -290,60 +290,45 @@ type endpointsInfo struct { } // String is part of proxy.Endpoint interface. -func (info *endpointsInfo) String() string { +func (info *endpointInfo) String() string { return net.JoinHostPort(info.ip, strconv.Itoa(int(info.port))) } -// GetIsLocal is part of proxy.Endpoint interface. -func (info *endpointsInfo) GetIsLocal() bool { +// IsLocal is part of proxy.Endpoint interface. +func (info *endpointInfo) IsLocal() bool { return info.isLocal } // IsReady returns true if an endpoint is ready and not terminating. -func (info *endpointsInfo) IsReady() bool { +func (info *endpointInfo) IsReady() bool { return info.ready } // IsServing returns true if an endpoint is ready, regardless of it's terminating state. -func (info *endpointsInfo) IsServing() bool { +func (info *endpointInfo) IsServing() bool { return info.serving } // IsTerminating returns true if an endpoint is terminating. -func (info *endpointsInfo) IsTerminating() bool { +func (info *endpointInfo) IsTerminating() bool { return info.terminating } -// GetZoneHint returns the zone hint for the endpoint. -func (info *endpointsInfo) GetZoneHints() sets.Set[string] { +// ZoneHints returns the zone hints for the endpoint. +func (info *endpointInfo) ZoneHints() sets.Set[string] { return sets.Set[string]{} } // IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface. -func (info *endpointsInfo) IP() string { +func (info *endpointInfo) IP() string { return info.ip } // Port returns just the Port part of the endpoint. -func (info *endpointsInfo) Port() (int, error) { +func (info *endpointInfo) Port() (int, error) { return int(info.port), nil } -// Equal is part of proxy.Endpoint interface. -func (info *endpointsInfo) Equal(other proxy.Endpoint) bool { - return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal() -} - -// GetNodeName returns the NodeName for this endpoint. -func (info *endpointsInfo) GetNodeName() string { - return "" -} - -// GetZone returns the Zone for this endpoint. -func (info *endpointsInfo) GetZone() string { - return "" -} - // Uses mac prefix and IPv4 address to return a mac address // This ensures mac addresses are unique for proper load balancing // There is a possibility of MAC collisions but this Mac address is used for remote endpoints only @@ -407,7 +392,7 @@ func (proxier *Proxier) onEndpointsMapChange(svcPortName *proxy.ServicePortName, if exists { // Cleanup Endpoints references for _, ep := range epInfos { - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if ok { epInfo.Cleanup() @@ -448,7 +433,7 @@ func (proxier *Proxier) onServiceMapChange(svcPortName *proxy.ServicePortName) { } } -// returns a new proxy.Endpoint which abstracts a endpointsInfo +// returns a new proxy.Endpoint which abstracts a endpointInfo func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo, _ *proxy.ServicePortName) proxy.Endpoint { portNumber, err := baseInfo.Port() @@ -457,25 +442,25 @@ func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo, _ *pro portNumber = 0 } - info := &endpointsInfo{ + info := &endpointInfo{ ip: baseInfo.IP(), port: uint16(portNumber), - isLocal: baseInfo.GetIsLocal(), + isLocal: baseInfo.IsLocal(), macAddress: conjureMac("02-11", netutils.ParseIPSloppy(baseInfo.IP())), refCount: new(uint16), hnsID: "", hns: proxier.hns, - ready: baseInfo.Ready, - serving: baseInfo.Serving, - terminating: baseInfo.Terminating, + ready: baseInfo.IsReady(), + serving: baseInfo.IsServing(), + terminating: baseInfo.IsTerminating(), } return info } -func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, providerAddress string) (*endpointsInfo, error) { - hnsEndpoint := &endpointsInfo{ +func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, providerAddress string) (*endpointInfo, error) { + hnsEndpoint := &endpointInfo{ ip: ip, isLocal: true, macAddress: mac, @@ -489,22 +474,22 @@ func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, return ep, err } -func (ep *endpointsInfo) DecrementRefCount() { - klog.V(3).InfoS("Decrementing Endpoint RefCount", "endpointsInfo", ep) - if !ep.GetIsLocal() && ep.refCount != nil && *ep.refCount > 0 { +func (ep *endpointInfo) DecrementRefCount() { + klog.V(3).InfoS("Decrementing Endpoint RefCount", "endpointInfo", ep) + if !ep.IsLocal() && ep.refCount != nil && *ep.refCount > 0 { *ep.refCount-- } } -func (ep *endpointsInfo) Cleanup() { - klog.V(3).InfoS("Endpoint cleanup", "endpointsInfo", ep) - if !ep.GetIsLocal() && ep.refCount != nil { +func (ep *endpointInfo) Cleanup() { + klog.V(3).InfoS("Endpoint cleanup", "endpointInfo", ep) + if !ep.IsLocal() && ep.refCount != nil { *ep.refCount-- // Remove the remote hns endpoint, if no service is referring it // Never delete a Local Endpoint. Local Endpoints are already created by other entities. // Remove only remote endpoints created by this service - if *ep.refCount <= 0 && !ep.GetIsLocal() { + if *ep.refCount <= 0 && !ep.IsLocal() { klog.V(4).InfoS("Removing endpoints, since no one is referencing it", "endpoint", ep) err := ep.hns.deleteEndpoint(ep.hnsID) if err == nil { @@ -594,6 +579,8 @@ type endPointsReferenceCountMap map[string]*uint16 // Proxier is an hns based proxy for connections between a localhost:lport // and services that provide the actual backends. type Proxier struct { + // ipFamily defines the IP family which this proxier is tracking. + ipFamily v1.IPFamily // TODO(imroc): implement node handler for winkernel proxier. proxyconfig.NoopNodeHandler @@ -601,7 +588,7 @@ type Proxier struct { // services that happened since policies were synced. For a single object, // changes are accumulated, i.e. previous is state from before all of them, // current is state after applying all of those. - endpointsChanges *proxy.EndpointChangeTracker + endpointsChanges *proxy.EndpointsChangeTracker serviceChanges *proxy.ServiceChangeTracker endPointsRefCount endPointsReferenceCountMap mu sync.Mutex // protects the following fields @@ -612,7 +599,6 @@ type Proxier struct { // with some partial data after kube-proxy restart. endpointSlicesSynced bool servicesSynced bool - isIPv6Mode bool initialized int32 syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules // These are effectively const and do not need the mutex to be held. @@ -622,7 +608,7 @@ type Proxier struct { recorder events.EventRecorder serviceHealthServer healthcheck.ServiceHealthServer - healthzServer healthcheck.ProxierHealthUpdater + healthzServer *healthcheck.ProxierHealthServer hns HostNetworkService hcn HcnService @@ -671,13 +657,14 @@ var _ proxy.Provider = &Proxier{} // NewProxier returns a new Proxier func NewProxier( + ipFamily v1.IPFamily, syncPeriod time.Duration, minSyncPeriod time.Duration, clusterCIDR string, hostname string, nodeIP net.IP, recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, + healthzServer *healthcheck.ProxierHealthServer, config config.KubeProxyWinkernelConfiguration, healthzPort int, ) (*Proxier, error) { @@ -690,12 +677,6 @@ func NewProxier( klog.InfoS("ClusterCIDR not specified, unable to distinguish between internal and external traffic") } - isIPv6 := netutils.IsIPv6(nodeIP) - ipFamily := v1.IPv4Protocol - if isIPv6 { - ipFamily = v1.IPv6Protocol - } - // windows listens to all node addresses nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil) serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) @@ -778,6 +759,7 @@ func NewProxier( } proxier := &Proxier{ + ipFamily: ipFamily, endPointsRefCount: make(endPointsReferenceCountMap), svcPortMap: make(proxy.ServicePortMap), endpointsMap: make(proxy.EndpointsMap), @@ -794,7 +776,6 @@ func NewProxier( hostMac: hostMac, isDSR: isDSR, supportedFeatures: supportedFeatures, - isIPv6Mode: isIPv6, healthzPort: healthzPort, rootHnsEndpointName: config.RootHnsEndpointName, forwardHealthCheckVip: config.ForwardHealthCheckVip, @@ -802,7 +783,7 @@ func NewProxier( } serviceChanges := proxy.NewServiceChangeTracker(proxier.newServiceInfo, ipFamily, recorder, proxier.serviceMapChange) - endPointChangeTracker := proxy.NewEndpointChangeTracker(hostname, proxier.newEndpointInfo, ipFamily, recorder, proxier.endpointsMapChange) + endPointChangeTracker := proxy.NewEndpointsChangeTracker(hostname, proxier.newEndpointInfo, ipFamily, recorder, proxier.endpointsMapChange) proxier.endpointsChanges = endPointChangeTracker proxier.serviceChanges = serviceChanges @@ -819,13 +800,13 @@ func NewDualStackProxier( hostname string, nodeIPs map[v1.IPFamily]net.IP, recorder events.EventRecorder, - healthzServer healthcheck.ProxierHealthUpdater, + healthzServer *healthcheck.ProxierHealthServer, config config.KubeProxyWinkernelConfiguration, healthzPort int, ) (proxy.Provider, error) { // Create an ipv4 instance of the single-stack proxier - ipv4Proxier, err := NewProxier(syncPeriod, minSyncPeriod, + ipv4Proxier, err := NewProxier(v1.IPv4Protocol, syncPeriod, minSyncPeriod, clusterCIDR, hostname, nodeIPs[v1.IPv4Protocol], recorder, healthzServer, config, healthzPort) @@ -833,7 +814,7 @@ func NewDualStackProxier( return nil, fmt.Errorf("unable to create ipv4 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIPs[v1.IPv4Protocol]) } - ipv6Proxier, err := NewProxier(syncPeriod, minSyncPeriod, + ipv6Proxier, err := NewProxier(v1.IPv6Protocol, syncPeriod, minSyncPeriod, clusterCIDR, hostname, nodeIPs[v1.IPv6Protocol], recorder, healthzServer, config, healthzPort) if err != nil { @@ -868,7 +849,7 @@ func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []proxy.Endpoint, mapSt } // Cleanup Endpoints references for _, ep := range endpoints { - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if ok { if winProxyOptimization { epInfo.DecrementRefCount() @@ -937,7 +918,7 @@ func (svcInfo *serviceInfo) deleteLoadBalancerPolicy(mapStaleLoadbalancer map[st // Sync is called to synchronize the proxier state to hns as soon as possible. func (proxier *Proxier) Sync() { if proxier.healthzServer != nil { - proxier.healthzServer.QueuedUpdate() + proxier.healthzServer.QueuedUpdate(proxier.ipFamily) } metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() proxier.syncRunner.Run() @@ -947,7 +928,7 @@ func (proxier *Proxier) Sync() { func (proxier *Proxier) SyncLoop() { // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { - proxier.healthzServer.Updated() + proxier.healthzServer.Updated(proxier.ipFamily) } // synthesize "last change queued" time as the informers are syncing. metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime() @@ -1062,11 +1043,11 @@ func isNetworkNotFoundError(err error) bool { // If atleast one is not terminating, then return false func (proxier *Proxier) isAllEndpointsTerminating(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { for _, epInfo := range proxier.endpointsMap[svcName] { - ep, ok := epInfo.(*endpointsInfo) + ep, ok := epInfo.(*endpointInfo) if !ok { continue } - if isLocalTrafficDSR && !ep.GetIsLocal() { + if isLocalTrafficDSR && !ep.IsLocal() { // KEP-1669: Ignore remote endpoints when the ExternalTrafficPolicy is Local (DSR Mode) continue } @@ -1087,11 +1068,11 @@ func (proxier *Proxier) isAllEndpointsTerminating(svcName proxy.ServicePortName, // If atleast one is serving, then return false func (proxier *Proxier) isAllEndpointsNonServing(svcName proxy.ServicePortName, isLocalTrafficDSR bool) bool { for _, epInfo := range proxier.endpointsMap[svcName] { - ep, ok := epInfo.(*endpointsInfo) + ep, ok := epInfo.(*endpointInfo) if !ok { continue } - if isLocalTrafficDSR && !ep.GetIsLocal() { + if isLocalTrafficDSR && !ep.IsLocal() { continue } if ep.IsServing() { @@ -1102,7 +1083,7 @@ func (proxier *Proxier) isAllEndpointsNonServing(svcName proxy.ServicePortName, } // updateQueriedEndpoints updates the queriedEndpoints map with newly created endpoint details -func updateQueriedEndpoints(newHnsEndpoint *endpointsInfo, queriedEndpoints map[string]*endpointsInfo) { +func updateQueriedEndpoints(newHnsEndpoint *endpointInfo, queriedEndpoints map[string]*endpointInfo) { // store newly created endpoints in queriedEndpoints queriedEndpoints[newHnsEndpoint.hnsID] = newHnsEndpoint queriedEndpoints[newHnsEndpoint.ip] = newHnsEndpoint @@ -1130,7 +1111,7 @@ func (proxier *Proxier) syncProxyRules() { hnsNetworkName := proxier.network.name hns := proxier.hns - var gatewayHnsendpoint *endpointsInfo + var gatewayHnsendpoint *endpointInfo if proxier.forwardHealthCheckVip { gatewayHnsendpoint, _ = hns.getEndpointByName(proxier.rootHnsEndpointName) } @@ -1153,7 +1134,7 @@ func (proxier *Proxier) syncProxyRules() { endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) deletedUDPClusterIPs := serviceUpdateResult.DeletedUDPClusterIPs - // merge stale services gathered from updateEndpointsMap + // merge stale services gathered from EndpointsMap.Update for _, svcPortName := range endpointUpdateResult.NewlyActiveUDPServices { if svcInfo, ok := proxier.svcPortMap[svcPortName]; ok && svcInfo != nil && svcInfo.Protocol() == v1.ProtocolUDP { klog.V(2).InfoS("Newly-active UDP service may have stale conntrack entries", "servicePortName", svcPortName) @@ -1168,7 +1149,7 @@ func (proxier *Proxier) syncProxyRules() { } if queriedEndpoints == nil { klog.V(4).InfoS("No existing endpoints found in HNS") - queriedEndpoints = make(map[string]*(endpointsInfo)) + queriedEndpoints = make(map[string]*(endpointInfo)) } queriedLoadBalancers, err := hns.getAllLoadBalancers() if queriedLoadBalancers == nil { @@ -1208,7 +1189,7 @@ func (proxier *Proxier) syncProxyRules() { serviceVipEndpoint := queriedEndpoints[svcInfo.ClusterIP().String()] if serviceVipEndpoint == nil { klog.V(4).InfoS("No existing remote endpoint", "IP", svcInfo.ClusterIP()) - hnsEndpoint := &endpointsInfo{ + hnsEndpoint := &endpointInfo{ ip: svcInfo.ClusterIP().String(), isLocal: false, macAddress: proxier.hostMac, @@ -1228,8 +1209,8 @@ func (proxier *Proxier) syncProxyRules() { } } - var hnsEndpoints []endpointsInfo - var hnsLocalEndpoints []endpointsInfo + var hnsEndpoints []endpointInfo + var hnsLocalEndpoints []endpointInfo klog.V(4).InfoS("Applying Policy", "serviceInfo", svcName) // Create Remote endpoints for every endpoint, corresponding to the service containsPublicIP := false @@ -1249,13 +1230,13 @@ func (proxier *Proxier) syncProxyRules() { } for _, epInfo := range proxier.endpointsMap[svcName] { - ep, ok := epInfo.(*endpointsInfo) + ep, ok := epInfo.(*endpointInfo) if !ok { - klog.ErrorS(nil, "Failed to cast endpointsInfo", "serviceName", svcName) + klog.ErrorS(nil, "Failed to cast endpointInfo", "serviceName", svcName) continue } - if svcInfo.internalTrafficLocal && svcInfo.localTrafficDSR && !ep.GetIsLocal() { + if svcInfo.internalTrafficLocal && svcInfo.localTrafficDSR && !ep.IsLocal() { // No need to use or create remote endpoint when internal and external traffic policy is remote klog.V(3).InfoS("Skipping the endpoint. Both internalTraffic and external traffic policies are local", "EpIP", ep.ip, " EpPort", ep.port) continue @@ -1274,7 +1255,7 @@ func (proxier *Proxier) syncProxyRules() { } - var newHnsEndpoint *endpointsInfo + var newHnsEndpoint *endpointInfo hnsNetworkName := proxier.network.name var err error @@ -1298,7 +1279,7 @@ func (proxier *Proxier) syncProxyRules() { } if newHnsEndpoint == nil { - if ep.GetIsLocal() { + if ep.IsLocal() { klog.ErrorS(err, "Local endpoint not found: on network", "ip", ep.IP(), "hnsNetworkName", hnsNetworkName) continue } @@ -1319,7 +1300,7 @@ func (proxier *Proxier) syncProxyRules() { providerAddress = proxier.nodeIP.String() } - hnsEndpoint := &endpointsInfo{ + hnsEndpoint := &endpointInfo{ ip: ep.ip, isLocal: false, macAddress: conjureMac("02-11", netutils.ParseIPSloppy(ep.ip)), @@ -1328,13 +1309,13 @@ func (proxier *Proxier) syncProxyRules() { newHnsEndpoint, err = hns.createEndpoint(hnsEndpoint, hnsNetworkName) if err != nil { - klog.ErrorS(err, "Remote endpoint creation failed", "endpointsInfo", hnsEndpoint) + klog.ErrorS(err, "Remote endpoint creation failed", "endpointInfo", hnsEndpoint) continue } updateQueriedEndpoints(newHnsEndpoint, queriedEndpoints) } else { - hnsEndpoint := &endpointsInfo{ + hnsEndpoint := &endpointInfo{ ip: ep.ip, isLocal: false, macAddress: ep.macAddress, @@ -1359,7 +1340,7 @@ func (proxier *Proxier) syncProxyRules() { // a) Endpoints are any IP's outside the cluster ==> Choose NodeIP as the SourceVIP // b) Endpoints are IP addresses of a remote node => Choose NodeIP as the SourceVIP // c) Everything else (Local POD's, Remote POD's, Node IP of current node) ==> Choose the configured SourceVIP - if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) && !ep.GetIsLocal() { + if strings.EqualFold(proxier.network.networkType, NETWORK_TYPE_OVERLAY) && !ep.IsLocal() { providerAddress := proxier.network.findRemoteSubnetProviderAddress(ep.IP()) isNodeIP := (ep.IP() == providerAddress) @@ -1371,10 +1352,10 @@ func (proxier *Proxier) syncProxyRules() { } // Save the hnsId for reference - klog.V(1).InfoS("Hns endpoint resource", "endpointsInfo", newHnsEndpoint) + klog.V(1).InfoS("Hns endpoint resource", "endpointInfo", newHnsEndpoint) hnsEndpoints = append(hnsEndpoints, *newHnsEndpoint) - if newHnsEndpoint.GetIsLocal() { + if newHnsEndpoint.IsLocal() { hnsLocalEndpoints = append(hnsLocalEndpoints, *newHnsEndpoint) } else { // We only share the refCounts for remote endpoints @@ -1384,10 +1365,10 @@ func (proxier *Proxier) syncProxyRules() { ep.hnsID = newHnsEndpoint.hnsID - klog.V(3).InfoS("Endpoint resource found", "endpointsInfo", ep) + klog.V(3).InfoS("Endpoint resource found", "endpointInfo", ep) } - klog.V(3).InfoS("Associated endpoints for service", "endpointsInfo", hnsEndpoints, "serviceName", svcName) + klog.V(3).InfoS("Associated endpoints for service", "endpointInfo", hnsEndpoints, "serviceName", svcName) if len(svcInfo.hnsID) > 0 { // This should not happen @@ -1399,7 +1380,7 @@ func (proxier *Proxier) syncProxyRules() { if len(hnsEndpoints) == 0 { if svcInfo.winProxyOptimization { // Deleting loadbalancers when there are no endpoints to serve. - klog.V(3).InfoS("Cleanup existing ", "endpointsInfo", hnsEndpoints, "serviceName", svcName) + klog.V(3).InfoS("Cleanup existing ", "endpointInfo", hnsEndpoints, "serviceName", svcName) svcInfo.deleteLoadBalancerPolicy(proxier.mapStaleLoadbalancers) } klog.ErrorS(nil, "Endpoint information not available for service, not applying any policy", "serviceName", svcName) @@ -1434,7 +1415,7 @@ func (proxier *Proxier) syncProxyRules() { // Cluster IP LoadBalancer creation hnsLoadBalancer, err := hns.getLoadBalancer( clusterIPEndpoints, - loadBalancerFlags{isDSR: proxier.isDSR, isIPv6: proxier.isIPv6Mode, sessionAffinity: sessionAffinityClientIP}, + loadBalancerFlags{isDSR: proxier.isDSR, isIPv6: proxier.ipFamily == v1.IPv6Protocol, sessionAffinity: sessionAffinityClientIP}, sourceVip, svcInfo.ClusterIP().String(), Enum(svcInfo.Protocol()), @@ -1469,7 +1450,7 @@ func (proxier *Proxier) syncProxyRules() { // If all endpoints are in terminating stage, then no need to create Node Port LoadBalancer hnsLoadBalancer, err := hns.getLoadBalancer( nodePortEndpoints, - loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, localRoutedVIP: true, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, localRoutedVIP: true, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.ipFamily == v1.IPv6Protocol}, sourceVip, "", Enum(svcInfo.Protocol()), @@ -1504,7 +1485,7 @@ func (proxier *Proxier) syncProxyRules() { // Try loading existing policies, if already available hnsLoadBalancer, err = hns.getLoadBalancer( externalIPEndpoints, - loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.localTrafficDSR, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.ipFamily == v1.IPv6Protocol}, sourceVip, externalIP.ip, Enum(svcInfo.Protocol()), @@ -1535,7 +1516,7 @@ func (proxier *Proxier) syncProxyRules() { if len(lbIngressEndpoints) > 0 { hnsLoadBalancer, err := hns.getLoadBalancer( lbIngressEndpoints, - loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isVipExternalIP: true, isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.ipFamily == v1.IPv6Protocol}, sourceVip, lbIngressIP.ip, Enum(svcInfo.Protocol()), @@ -1560,10 +1541,10 @@ func (proxier *Proxier) syncProxyRules() { nodeport = svcInfo.HealthCheckNodePort() } - proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.healthCheckHnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), []endpointsInfo{*gatewayHnsendpoint}, queriedLoadBalancers) + proxier.deleteExistingLoadBalancer(hns, svcInfo.winProxyOptimization, &lbIngressIP.healthCheckHnsID, sourceVip, Enum(svcInfo.Protocol()), uint16(svcInfo.targetPort), uint16(svcInfo.Port()), []endpointInfo{*gatewayHnsendpoint}, queriedLoadBalancers) hnsHealthCheckLoadBalancer, err := hns.getLoadBalancer( - []endpointsInfo{*gatewayHnsendpoint}, + []endpointInfo{*gatewayHnsendpoint}, loadBalancerFlags{isDSR: false, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP}, sourceVip, lbIngressIP.ip, @@ -1587,7 +1568,7 @@ func (proxier *Proxier) syncProxyRules() { } if proxier.healthzServer != nil { - proxier.healthzServer.Updated() + proxier.healthzServer.Updated(proxier.ipFamily) } metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime() @@ -1623,7 +1604,7 @@ func (proxier *Proxier) syncProxyRules() { // deleteExistingLoadBalancer checks whether loadbalancer delete is needed or not. // If it is needed, the function will delete the existing loadbalancer and return true, else false. -func (proxier *Proxier) deleteExistingLoadBalancer(hns HostNetworkService, winProxyOptimization bool, lbHnsID *string, sourceVip string, protocol, intPort, extPort uint16, endpoints []endpointsInfo, queriedLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) bool { +func (proxier *Proxier) deleteExistingLoadBalancer(hns HostNetworkService, winProxyOptimization bool, lbHnsID *string, sourceVip string, protocol, intPort, extPort uint16, endpoints []endpointInfo, queriedLoadBalancers map[loadBalancerIdentifier]*loadBalancerInfo) bool { if !winProxyOptimization || *lbHnsID == "" { // Loadbalancer delete not needed diff --git a/pkg/proxy/winkernel/proxier_test.go b/pkg/proxy/winkernel/proxier_test.go index 4864a6de7b23b..1b9bd287d24da 100644 --- a/pkg/proxy/winkernel/proxier_test.go +++ b/pkg/proxy/winkernel/proxier_test.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/proxy/healthcheck" fakehcn "k8s.io/kubernetes/pkg/proxy/winkernel/testing" netutils "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -123,8 +123,8 @@ func NewFakeProxier(syncPeriod time.Duration, minSyncPeriod time.Duration, clust } serviceChanges := proxy.NewServiceChangeTracker(proxier.newServiceInfo, v1.IPv4Protocol, nil, proxier.serviceMapChange) - endpointChangeTracker := proxy.NewEndpointChangeTracker(hostname, proxier.newEndpointInfo, v1.IPv4Protocol, nil, proxier.endpointsMapChange) - proxier.endpointsChanges = endpointChangeTracker + endpointsChangeTracker := proxy.NewEndpointsChangeTracker(hostname, proxier.newEndpointInfo, v1.IPv4Protocol, nil, proxier.endpointsMapChange) + proxier.endpointsChanges = endpointsChangeTracker proxier.serviceChanges = serviceChanges return proxier @@ -146,7 +146,6 @@ func TestCreateServiceVip(t *testing.T) { Port: "p80", Protocol: v1.ProtocolTCP, } - timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds makeServiceMap(proxier, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { @@ -156,7 +155,7 @@ func TestCreateServiceVip(t *testing.T) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ ClientIP: &v1.ClientIPConfig{ - TimeoutSeconds: &timeoutSeconds, + TimeoutSeconds: ptr.To[int32](v1.DefaultClientIPServiceAffinitySeconds), }, } svc.Spec.Ports = []v1.ServicePort{{ @@ -200,7 +199,6 @@ func TestCreateRemoteEndpointOverlay(t *testing.T) { Port: "p80", Protocol: v1.ProtocolTCP, } - tcpProtocol := v1.ProtocolTCP makeServiceMap(proxier, makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { @@ -221,9 +219,9 @@ func TestCreateRemoteEndpointOverlay(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -232,21 +230,21 @@ func TestCreateRemoteEndpointOverlay(t *testing.T) { proxier.syncProxyRules() ep := proxier.endpointsMap[svcPortName][0] - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName.String()) } else { - if epInfo.hnsID != endpointGuid1 { + if epInfo.hnsID != "EPID-3" { t.Errorf("%v does not match %v", epInfo.hnsID, endpointGuid1) } } - if *proxier.endPointsRefCount[endpointGuid1] <= 0 { + if *proxier.endPointsRefCount["EPID-3"] <= 0 { t.Errorf("RefCount not incremented. Current value: %v", *proxier.endPointsRefCount[endpointGuid1]) } - if *proxier.endPointsRefCount[endpointGuid1] != *epInfo.refCount { + if *proxier.endPointsRefCount["EPID-3"] != *epInfo.refCount { t.Errorf("Global refCount: %v does not match endpoint refCount: %v", *proxier.endPointsRefCount[endpointGuid1], *epInfo.refCount) } } @@ -258,14 +256,13 @@ func TestCreateRemoteEndpointL2Bridge(t *testing.T) { t.Error() } - tcpProtocol := v1.ProtocolTCP svcIP := "10.20.30.41" svcPort := 80 svcNodePort := 3001 svcPortName := proxy.ServicePortName{ NamespacedName: makeNSN("ns1", "svc1"), Port: "p80", - Protocol: tcpProtocol, + Protocol: v1.ProtocolTCP, } makeServiceMap(proxier, @@ -275,7 +272,7 @@ func TestCreateRemoteEndpointL2Bridge(t *testing.T) { svc.Spec.Ports = []v1.ServicePort{{ Name: svcPortName.Port, Port: int32(svcPort), - Protocol: tcpProtocol, + Protocol: v1.ProtocolTCP, NodePort: int32(svcNodePort), }} }), @@ -287,18 +284,18 @@ func TestCreateRemoteEndpointL2Bridge(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) proxier.setInitialized(true) proxier.syncProxyRules() ep := proxier.endpointsMap[svcPortName][0] - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName.String()) } else { if epInfo.hnsID != endpointGuid1 { @@ -316,7 +313,6 @@ func TestCreateRemoteEndpointL2Bridge(t *testing.T) { } func TestSharedRemoteEndpointDelete(t *testing.T) { syncPeriod := 30 * time.Second - tcpProtocol := v1.ProtocolTCP proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge") if proxier == nil { t.Error() @@ -369,9 +365,9 @@ func TestSharedRemoteEndpointDelete(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName1.Port), - Port: pointer.Int32(int32(svcPort1)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName1.Port), + Port: ptr.To(int32(svcPort1)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice(svcPortName2.Namespace, svcPortName2.Name, 1, func(eps *discovery.EndpointSlice) { @@ -380,18 +376,18 @@ func TestSharedRemoteEndpointDelete(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName2.Port), - Port: pointer.Int32(int32(svcPort2)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName2.Port), + Port: ptr.To(int32(svcPort2)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) proxier.setInitialized(true) proxier.syncProxyRules() ep := proxier.endpointsMap[svcPortName1][0] - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName1.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName1.String()) } else { if epInfo.hnsID != endpointGuid1 { @@ -428,9 +424,9 @@ func TestSharedRemoteEndpointDelete(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName2.Port), - Port: pointer.Int32(int32(svcPort2)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName2.Port), + Port: ptr.To(int32(svcPort2)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -439,9 +435,9 @@ func TestSharedRemoteEndpointDelete(t *testing.T) { proxier.syncProxyRules() ep = proxier.endpointsMap[svcPortName1][0] - epInfo, ok = ep.(*endpointsInfo) + epInfo, ok = ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName1.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName1.String()) } else { if epInfo.hnsID != endpointGuid1 { @@ -505,7 +501,6 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { }), ) - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(proxier, makeTestEndpointSlice(svcPortName1.Namespace, svcPortName1.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 @@ -513,9 +508,9 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName1.Port), - Port: pointer.Int32(int32(svcPort1)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName1.Port), + Port: ptr.To(int32(svcPort1)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice(svcPortName2.Namespace, svcPortName2.Name, 1, func(eps *discovery.EndpointSlice) { @@ -524,18 +519,19 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName2.Port), - Port: pointer.Int32(int32(svcPort2)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName2.Port), + Port: ptr.To(int32(svcPort2)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) + proxier.setInitialized(true) proxier.syncProxyRules() ep := proxier.endpointsMap[svcPortName1][0] - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName1.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName1.String()) } else { if epInfo.hnsID != endpointGuid1 { @@ -582,9 +578,9 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName1.Port), - Port: pointer.Int32(int32(svcPort1)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName1.Port), + Port: ptr.To(int32(svcPort1)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), makeTestEndpointSlice(svcPortName1.Namespace, svcPortName1.Name, 1, func(eps *discovery.EndpointSlice) { @@ -593,14 +589,14 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName1.Port), - Port: pointer.Int32(int32(svcPort1)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName1.Port), + Port: ptr.To(int32(svcPort1)), + Protocol: ptr.To(v1.ProtocolTCP), }, { - Name: pointer.String("p443"), - Port: pointer.Int32(int32(443)), - Protocol: &tcpProtocol, + Name: ptr.To("p443"), + Port: ptr.To[int32](443), + Protocol: ptr.To(v1.ProtocolTCP), }} })) @@ -612,10 +608,10 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { proxier.syncProxyRules() ep = proxier.endpointsMap[svcPortName1][0] - epInfo, ok = ep.(*endpointsInfo) + epInfo, ok = ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName1.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName1.String()) } else { if epInfo.hnsID != endpointGuid1 { @@ -633,7 +629,6 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) { } func TestCreateLoadBalancer(t *testing.T) { syncPeriod := 30 * time.Second - tcpProtocol := v1.ProtocolTCP proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY) if proxier == nil { t.Error() @@ -667,9 +662,9 @@ func TestCreateLoadBalancer(t *testing.T) { Addresses: []string{epIpAddressRemote}, }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -722,18 +717,17 @@ func TestCreateDsrLoadBalancer(t *testing.T) { }} }), ) - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(proxier, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIpAddressRemote}, - NodeName: pointer.String("testhost"), + NodeName: ptr.To("testhost"), }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.String(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -759,7 +753,7 @@ func TestCreateDsrLoadBalancer(t *testing.T) { } if len(svcInfo.loadBalancerIngressIPs) == 0 { t.Errorf("svcInfo does not have any loadBalancerIngressIPs, %+v", svcInfo) - } else if svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID != loadbalancerGuid1 { + } else if svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID != "LBID-4" { t.Errorf("The Hns Loadbalancer HealthCheck Id %v does not match %v. ServicePortName %q", svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID, loadbalancerGuid1, svcPortName.String()) } } @@ -802,18 +796,17 @@ func TestClusterIPLBInCreateDsrLoadBalancer(t *testing.T) { }} }), ) - tcpProtocol := v1.ProtocolTCP populateEndpointSlices(proxier, makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) { eps.AddressType = discovery.AddressTypeIPv4 eps.Endpoints = []discovery.Endpoint{{ Addresses: []string{epIpAddressRemote}, - NodeName: pointer.StringPtr("testhost2"), // This will make this endpoint as a remote endpoint + NodeName: ptr.To("testhost2"), // This will make this endpoint as a remote endpoint }} eps.Ports = []discovery.EndpointPort{{ - Name: pointer.StringPtr(svcPortName.Port), - Port: pointer.Int32(int32(svcPort)), - Protocol: &tcpProtocol, + Name: ptr.To(svcPortName.Port), + Port: ptr.To(int32(svcPort)), + Protocol: ptr.To(v1.ProtocolTCP), }} }), ) @@ -876,7 +869,6 @@ func TestEndpointSlice(t *testing.T) { }) // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP endpointSlice := &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-1", svcPortName.Name), @@ -885,14 +877,14 @@ func TestEndpointSlice(t *testing.T) { }, Ports: []discovery.EndpointPort{{ Name: &svcPortName.Port, - Port: pointer.Int32(80), - Protocol: &tcpProtocol, + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), }}, AddressType: discovery.AddressTypeIPv4, Endpoints: []discovery.Endpoint{{ Addresses: []string{"192.168.2.3"}, - Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)}, - NodeName: pointer.String("testhost2"), + Conditions: discovery.EndpointConditions{Ready: ptr.To(true)}, + NodeName: ptr.To("testhost2"), }}, } @@ -912,12 +904,12 @@ func TestEndpointSlice(t *testing.T) { } ep := proxier.endpointsMap[svcPortName][0] - epInfo, ok := ep.(*endpointsInfo) + epInfo, ok := ep.(*endpointInfo) if !ok { - t.Errorf("Failed to cast endpointsInfo %q", svcPortName.String()) + t.Errorf("Failed to cast endpointInfo %q", svcPortName.String()) } else { - if epInfo.hnsID != endpointGuid1 { + if epInfo.hnsID != "EPID-3" { t.Errorf("Hns EndpointId %v does not match %v. ServicePortName %q", epInfo.hnsID, endpointGuid1, svcPortName.String()) } } diff --git a/pkg/proxy/winkernel/testing/hcnutils_mock.go b/pkg/proxy/winkernel/testing/hcnutils_mock.go index 1c25ff62635a4..319f2e10c4748 100644 --- a/pkg/proxy/winkernel/testing/hcnutils_mock.go +++ b/pkg/proxy/winkernel/testing/hcnutils_mock.go @@ -26,34 +26,37 @@ import ( "github.com/Microsoft/hcsshim/hcn" ) +var ( + epIdCounter int + lbIdCounter int + endpointMap map[string]*hcn.HostComputeEndpoint + loadbalancerMap map[string]*hcn.HostComputeLoadBalancer +) + type HcnMock struct { - epIdCounter int - lbIdCounter int - endpointMap map[string]*hcn.HostComputeEndpoint - loadbalancerMap map[string]*hcn.HostComputeLoadBalancer supportedFeatures hcn.SupportedFeatures network *hcn.HostComputeNetwork } func (hcnObj HcnMock) generateEndpointGuid() (endpointId string, endpointName string) { - hcnObj.epIdCounter++ - endpointId = fmt.Sprintf("EPID-%d", hcnObj.epIdCounter) - endpointName = fmt.Sprintf("EPName-%d", hcnObj.epIdCounter) + epIdCounter++ + endpointId = fmt.Sprintf("EPID-%d", epIdCounter) + endpointName = fmt.Sprintf("EPName-%d", epIdCounter) return } func (hcnObj HcnMock) generateLoadbalancerGuid() (loadbalancerId string) { - hcnObj.lbIdCounter++ - loadbalancerId = fmt.Sprintf("LBID-%d", hcnObj.lbIdCounter) + lbIdCounter++ + loadbalancerId = fmt.Sprintf("LBID-%d", lbIdCounter) return } func NewHcnMock(hnsNetwork *hcn.HostComputeNetwork) *HcnMock { + epIdCounter = 0 + lbIdCounter = 0 + endpointMap = make(map[string]*hcn.HostComputeEndpoint) + loadbalancerMap = make(map[string]*hcn.HostComputeLoadBalancer) return &HcnMock{ - epIdCounter: 0, - lbIdCounter: 0, - endpointMap: make(map[string]*hcn.HostComputeEndpoint), - loadbalancerMap: make(map[string]*hcn.HostComputeLoadBalancer), supportedFeatures: hcn.SupportedFeatures{ Api: hcn.ApiSupport{ V2: true, @@ -79,8 +82,8 @@ func (hcnObj HcnMock) PopulateQueriedEndpoints(epId, hnsId, ipAddress, mac strin MacAddress: mac, } - hcnObj.endpointMap[endpoint.Id] = endpoint - hcnObj.endpointMap[endpoint.Name] = endpoint + endpointMap[endpoint.Id] = endpoint + endpointMap[endpoint.Name] = endpoint } func (hcnObj HcnMock) GetNetworkByName(networkName string) (*hcn.HostComputeNetwork, error) { @@ -93,7 +96,7 @@ func (hcnObj HcnMock) GetNetworkByID(networkID string) (*hcn.HostComputeNetwork, func (hcnObj HcnMock) ListEndpoints() ([]hcn.HostComputeEndpoint, error) { var hcnEPList []hcn.HostComputeEndpoint - for _, ep := range hcnObj.endpointMap { + for _, ep := range endpointMap { hcnEPList = append(hcnEPList, *ep) } return hcnEPList, nil @@ -101,7 +104,7 @@ func (hcnObj HcnMock) ListEndpoints() ([]hcn.HostComputeEndpoint, error) { func (hcnObj HcnMock) ListEndpointsOfNetwork(networkId string) ([]hcn.HostComputeEndpoint, error) { var hcnEPList []hcn.HostComputeEndpoint - for _, ep := range hcnObj.endpointMap { + for _, ep := range endpointMap { if ep.HostComputeNetwork == networkId { hcnEPList = append(hcnEPList, *ep) } @@ -110,7 +113,7 @@ func (hcnObj HcnMock) ListEndpointsOfNetwork(networkId string) ([]hcn.HostComput } func (hcnObj HcnMock) GetEndpointByID(endpointId string) (*hcn.HostComputeEndpoint, error) { - if ep, ok := hcnObj.endpointMap[endpointId]; ok { + if ep, ok := endpointMap[endpointId]; ok { return ep, nil } epNotFoundError := hcn.EndpointNotFoundError{EndpointID: endpointId} @@ -118,7 +121,7 @@ func (hcnObj HcnMock) GetEndpointByID(endpointId string) (*hcn.HostComputeEndpoi } func (hcnObj HcnMock) GetEndpointByName(endpointName string) (*hcn.HostComputeEndpoint, error) { - if ep, ok := hcnObj.endpointMap[endpointName]; ok { + if ep, ok := endpointMap[endpointName]; ok { return ep, nil } epNotFoundError := hcn.EndpointNotFoundError{EndpointName: endpointName} @@ -129,15 +132,16 @@ func (hcnObj HcnMock) CreateEndpoint(network *hcn.HostComputeNetwork, endpoint * if _, err := hcnObj.GetNetworkByID(network.Id); err != nil { return nil, err } - if _, ok := hcnObj.endpointMap[endpoint.Id]; ok { + if _, ok := endpointMap[endpoint.Id]; ok { return nil, fmt.Errorf("endpoint id %s already present", endpoint.Id) } - if _, ok := hcnObj.endpointMap[endpoint.Name]; ok { + if _, ok := endpointMap[endpoint.Name]; ok { return nil, fmt.Errorf("endpoint Name %s already present", endpoint.Name) } endpoint.Id, endpoint.Name = hcnObj.generateEndpointGuid() - hcnObj.endpointMap[endpoint.Id] = endpoint - hcnObj.endpointMap[endpoint.Name] = endpoint + endpoint.HostComputeNetwork = network.Id + endpointMap[endpoint.Id] = endpoint + endpointMap[endpoint.Name] = endpoint return endpoint, nil } @@ -146,24 +150,24 @@ func (hcnObj HcnMock) CreateRemoteEndpoint(network *hcn.HostComputeNetwork, endp } func (hcnObj HcnMock) DeleteEndpoint(endpoint *hcn.HostComputeEndpoint) error { - if _, ok := hcnObj.endpointMap[endpoint.Id]; !ok { + if _, ok := endpointMap[endpoint.Id]; !ok { return hcn.EndpointNotFoundError{EndpointID: endpoint.Id} } - delete(hcnObj.endpointMap, endpoint.Id) - delete(hcnObj.endpointMap, endpoint.Name) + delete(endpointMap, endpoint.Id) + delete(endpointMap, endpoint.Name) return nil } func (hcnObj HcnMock) ListLoadBalancers() ([]hcn.HostComputeLoadBalancer, error) { var hcnLBList []hcn.HostComputeLoadBalancer - for _, lb := range hcnObj.loadbalancerMap { + for _, lb := range loadbalancerMap { hcnLBList = append(hcnLBList, *lb) } return hcnLBList, nil } func (hcnObj HcnMock) GetLoadBalancerByID(loadBalancerId string) (*hcn.HostComputeLoadBalancer, error) { - if lb, ok := hcnObj.loadbalancerMap[loadBalancerId]; ok { + if lb, ok := loadbalancerMap[loadBalancerId]; ok { return lb, nil } lbNotFoundError := hcn.LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} @@ -171,19 +175,19 @@ func (hcnObj HcnMock) GetLoadBalancerByID(loadBalancerId string) (*hcn.HostCompu } func (hcnObj HcnMock) CreateLoadBalancer(loadBalancer *hcn.HostComputeLoadBalancer) (*hcn.HostComputeLoadBalancer, error) { - if _, ok := hcnObj.loadbalancerMap[loadBalancer.Id]; ok { + if _, ok := loadbalancerMap[loadBalancer.Id]; ok { return nil, fmt.Errorf("LoadBalancer id %s Already Present", loadBalancer.Id) } loadBalancer.Id = hcnObj.generateLoadbalancerGuid() - hcnObj.loadbalancerMap[loadBalancer.Id] = loadBalancer + loadbalancerMap[loadBalancer.Id] = loadBalancer return loadBalancer, nil } func (hcnObj HcnMock) DeleteLoadBalancer(loadBalancer *hcn.HostComputeLoadBalancer) error { - if _, ok := hcnObj.loadbalancerMap[loadBalancer.Id]; !ok { + if _, ok := loadbalancerMap[loadBalancer.Id]; !ok { return hcn.LoadBalancerNotFoundError{LoadBalancerId: loadBalancer.Id} } - delete(hcnObj.loadbalancerMap, loadBalancer.Id) + delete(loadbalancerMap, loadBalancer.Id) return nil } @@ -206,7 +210,7 @@ func (hcnObj HcnMock) DsrSupported() error { } func (hcnObj HcnMock) DeleteAllHnsLoadBalancerPolicy() { - for k := range hcnObj.loadbalancerMap { - delete(hcnObj.loadbalancerMap, k) + for k := range loadbalancerMap { + delete(loadbalancerMap, k) } } diff --git a/pkg/registry/batch/cronjob/strategy.go b/pkg/registry/batch/cronjob/strategy.go index 62233ba2ae630..8c7eebc6d80d3 100644 --- a/pkg/registry/batch/cronjob/strategy.go +++ b/pkg/registry/batch/cronjob/strategy.go @@ -123,9 +123,6 @@ func (cronJobStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) warnings = append(warnings, fmt.Sprintf("metadata.name: this is used in Pod names and hostnames, which can result in surprising behavior; a DNS label is recommended: %v", msgs)) } warnings = append(warnings, job.WarningsForJobSpec(ctx, field.NewPath("spec", "jobTemplate", "spec"), &newCronJob.Spec.JobTemplate.Spec, nil)...) - if strings.Contains(newCronJob.Spec.Schedule, "TZ") { - warnings = append(warnings, fmt.Sprintf("CRON_TZ or TZ used in %s is not officially supported, see https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ for more details", field.NewPath("spec", "spec", "schedule"))) - } return warnings } @@ -160,7 +157,7 @@ func (cronJobStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Ob warnings = job.WarningsForJobSpec(ctx, field.NewPath("spec", "jobTemplate", "spec"), &newCronJob.Spec.JobTemplate.Spec, &oldCronJob.Spec.JobTemplate.Spec) } if strings.Contains(newCronJob.Spec.Schedule, "TZ") { - warnings = append(warnings, fmt.Sprintf("CRON_TZ or TZ used in %s is not officially supported, see https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ for more details", field.NewPath("spec", "spec", "schedule"))) + warnings = append(warnings, fmt.Sprintf("cannot use TZ or CRON_TZ in %s, use timeZone instead, see https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ for more details", field.NewPath("spec", "spec", "schedule"))) } return warnings } diff --git a/pkg/registry/batch/cronjob/strategy_test.go b/pkg/registry/batch/cronjob/strategy_test.go index 86e8e3b090c80..f9b0098eaae9c 100644 --- a/pkg/registry/batch/cronjob/strategy_test.go +++ b/pkg/registry/batch/cronjob/strategy_test.go @@ -273,20 +273,6 @@ func TestCronJobStrategy_WarningsOnCreate(t *testing.T) { }, }, }, - "timezone invalid": { - wantWarningsCount: 1, - cronjob: &batch.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: "mycronjob", - Namespace: metav1.NamespaceDefault, - ResourceVersion: "9", - }, - Spec: cronjobSpecWithTZinSchedule, - Status: batch.CronJobStatus{ - LastScheduleTime: &now, - }, - }, - }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { diff --git a/pkg/registry/batch/job/storage/storage_test.go b/pkg/registry/batch/job/storage/storage_test.go index 765b31795094d..9894b57a3b3f0 100644 --- a/pkg/registry/batch/job/storage/storage_test.go +++ b/pkg/registry/batch/job/storage/storage_test.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "k8s.io/utils/pointer" "testing" batchv1 "k8s.io/api/batch/v1" @@ -137,9 +138,10 @@ func TestCreate(t *testing.T) { // invalid (empty selector) &batch.Job{ Spec: batch.JobSpec{ - Completions: validJob.Spec.Completions, - Selector: &metav1.LabelSelector{}, - Template: validJob.Spec.Template, + ManualSelector: pointer.Bool(false), + Completions: validJob.Spec.Completions, + Selector: &metav1.LabelSelector{}, + Template: validJob.Spec.Template, }, }, ) diff --git a/pkg/registry/batch/job/strategy.go b/pkg/registry/batch/job/strategy.go index 75e253857c15e..cb4b2b3a5ffba 100644 --- a/pkg/registry/batch/job/strategy.go +++ b/pkg/registry/batch/job/strategy.go @@ -92,6 +92,7 @@ func (jobStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { // PrepareForCreate clears the status of a job before creation. func (jobStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { job := obj.(*batch.Job) + generateSelectorIfNeeded(job) job.Status = batch.JobStatus{} job.Generation = 1 @@ -163,10 +164,6 @@ func (jobStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object // Validate validates a new job. func (jobStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { job := obj.(*batch.Job) - // TODO: move UID generation earlier and do this in defaulting logic? - if job.Spec.ManualSelector == nil || *job.Spec.ManualSelector == false { - generateSelector(job) - } opts := validationOptionsForJob(job, nil) return batchvalidation.ValidateJob(job, opts) } @@ -213,6 +210,13 @@ func (jobStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []s return warnings } +// generateSelectorIfNeeded checks the job's manual selector flag and generates selector labels if the flag is true. +func generateSelectorIfNeeded(obj *batch.Job) { + if !*obj.Spec.ManualSelector { + generateSelector(obj) + } +} + // generateSelector adds a selector to a job and labels to its template // which can be used to uniquely identify the pods created by that job, // if the user has requested this behavior. diff --git a/pkg/registry/batch/job/strategy_test.go b/pkg/registry/batch/job/strategy_test.go index c2ee060b4a730..4c1058d9e200d 100644 --- a/pkg/registry/batch/job/strategy_test.go +++ b/pkg/registry/batch/job/strategy_test.go @@ -460,6 +460,8 @@ func TestJobStrategy_PrepareForUpdate(t *testing.T) { func TestJobStrategy_PrepareForCreate(t *testing.T) { validSelector := getValidLabelSelector() validPodTemplateSpec := getValidPodTemplateSpecForSelector(validSelector) + validSelectorWithBatchLabels := &metav1.LabelSelector{MatchLabels: getValidBatchLabelsWithNonBatch()} + expectedPodTemplateSpec := getValidPodTemplateSpecForSelector(validSelectorWithBatchLabels) podFailurePolicy := &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ @@ -481,12 +483,31 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { job batch.Job wantJob batch.Job }{ + "generate selectors": { + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: validPodTemplateSpec, + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, + }, + }, + }, "create job with a new fields; JobBackoffLimitPerIndex enabled": { enableJobBackoffLimitPerIndex: true, job: batch.Job{ ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, BackoffLimitPerIndex: pointer.Int32(1), MaxFailedIndexes: pointer.Int32(1), @@ -496,7 +517,8 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ Selector: validSelector, - Template: validPodTemplateSpec, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, BackoffLimitPerIndex: pointer.Int32(1), MaxFailedIndexes: pointer.Int32(1), }, @@ -508,6 +530,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, BackoffLimitPerIndex: pointer.Int32(1), MaxFailedIndexes: pointer.Int32(1), @@ -517,7 +540,8 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ Selector: validSelector, - Template: validPodTemplateSpec, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, BackoffLimitPerIndex: nil, MaxFailedIndexes: nil, }, @@ -529,6 +553,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, PodFailurePolicy: podFailurePolicy, }, @@ -537,7 +562,8 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ Selector: validSelector, - Template: validPodTemplateSpec, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, PodFailurePolicy: podFailurePolicy, }, }, @@ -548,6 +574,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, PodReplacementPolicy: podReplacementPolicy(batch.Failed), }, @@ -556,7 +583,8 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ Selector: validSelector, - Template: validPodTemplateSpec, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, PodReplacementPolicy: podReplacementPolicy(batch.Failed), }, }, @@ -567,6 +595,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, PodReplacementPolicy: podReplacementPolicy(batch.Failed), }, @@ -575,7 +604,8 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ Selector: validSelector, - Template: validPodTemplateSpec, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, PodReplacementPolicy: nil, }, }, @@ -586,6 +616,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, PodFailurePolicy: podFailurePolicy, }, @@ -594,7 +625,8 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ Selector: validSelector, - Template: validPodTemplateSpec, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, PodFailurePolicy: nil, }, }, @@ -603,8 +635,9 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { job: batch.Job{ ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ - Selector: validSelector, - Template: validPodTemplateSpec, + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: validPodTemplateSpec, }, Status: batch.JobStatus{ Active: 1, @@ -613,8 +646,9 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { wantJob: batch.Job{ ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ - Selector: validSelector, - Template: validPodTemplateSpec, + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, }, }, }, @@ -625,6 +659,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, BackoffLimitPerIndex: pointer.Int32(1), PodFailurePolicy: &batch.PodFailurePolicy{ @@ -643,8 +678,9 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { wantJob: batch.Job{ ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ - Selector: validSelector, - Template: validPodTemplateSpec, + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{}, }, @@ -658,6 +694,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { ObjectMeta: getValidObjectMeta(0), Spec: batch.JobSpec{ Selector: validSelector, + ManualSelector: pointer.Bool(false), Template: validPodTemplateSpec, BackoffLimitPerIndex: pointer.Int32(1), PodFailurePolicy: &batch.PodFailurePolicy{ @@ -690,8 +727,9 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { wantJob: batch.Job{ ObjectMeta: getValidObjectMeta(1), Spec: batch.JobSpec{ - Selector: validSelector, - Template: validPodTemplateSpec, + Selector: validSelector, + ManualSelector: pointer.Bool(false), + Template: expectedPodTemplateSpec, PodFailurePolicy: &batch.PodFailurePolicy{ Rules: []batch.PodFailurePolicyRule{ { @@ -1291,12 +1329,11 @@ func TestJobStrategy_WarningsOnCreate(t *testing.T) { func TestJobStrategy_Validate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() - theUID := types.UID("1a2b3c4d5e6f7g8h9i0k") - validSelector := &metav1.LabelSelector{ - MatchLabels: map[string]string{"a": "b"}, - } - validLabels := map[string]string{batch.LegacyJobNameLabel: "myjob2", batch.JobNameLabel: "myjob2", batch.LegacyControllerUidLabel: string(theUID), batch.ControllerUidLabel: string(theUID)} - labelsWithNonBatch := map[string]string{"a": "b", batch.LegacyJobNameLabel: "myjob2", batch.JobNameLabel: "myjob2", batch.LegacyControllerUidLabel: string(theUID), batch.ControllerUidLabel: string(theUID)} + theUID := getValidUID() + validSelector := getValidLabelSelector() + batchLabels := getValidBatchLabels() + labelsWithNonBatch := getValidBatchLabelsWithNonBatch() + defaultSelector := &metav1.LabelSelector{MatchLabels: map[string]string{batch.ControllerUidLabel: string(theUID)}} validPodSpec := api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, DNSPolicy: api.DNSClusterFirst, @@ -1304,11 +1341,7 @@ func TestJobStrategy_Validate(t *testing.T) { } validPodSpecNever := *validPodSpec.DeepCopy() validPodSpecNever.RestartPolicy = api.RestartPolicyNever - validObjectMeta := metav1.ObjectMeta{ - Name: "myjob2", - Namespace: metav1.NamespaceDefault, - UID: theUID, - } + validObjectMeta := getValidObjectMeta(0) testcases := map[string]struct { enableJobPodFailurePolicy bool enableJobBackoffLimitPerIndex bool @@ -1316,14 +1349,15 @@ func TestJobStrategy_Validate(t *testing.T) { wantJob *batch.Job wantWarningCount int32 }{ - "valid job with labels in pod template": { + "valid job with batch labels in pod template": { job: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: nil, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: validSelector.MatchLabels, + Labels: batchLabels, }, Spec: validPodSpec, }}, @@ -1331,44 +1365,51 @@ func TestJobStrategy_Validate(t *testing.T) { wantJob: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{batch.ControllerUidLabel: string(theUID)}}, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: validSelector.MatchLabels, + Labels: batchLabels, }, Spec: validPodSpec, }}, }, }, - "no labels in job": { + "valid job with batch and non-batch labels in pod template": { job: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: nil, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labelsWithNonBatch, + }, Spec: validPodSpec, }}, }, wantJob: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{batch.ControllerUidLabel: string(theUID)}}, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: validLabels, + Labels: labelsWithNonBatch, }, Spec: validPodSpec, }}, }, }, - "labels exist": { + "job with non-batch labels and without batch labels in pod template": { job: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: nil, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: labelsWithNonBatch, + Labels: map[string]string{}, }, Spec: validPodSpec, }}, @@ -1376,14 +1417,35 @@ func TestJobStrategy_Validate(t *testing.T) { wantJob: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{batch.ControllerUidLabel: string(theUID)}}, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: labelsWithNonBatch, + Labels: map[string]string{}, }, Spec: validPodSpec, }}, }, + wantWarningCount: 5, + }, + "no labels in job": { + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: defaultSelector, + Template: api.PodTemplateSpec{ + Spec: validPodSpec, + }}, + }, + wantJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: defaultSelector, + Template: api.PodTemplateSpec{ + Spec: validPodSpec, + }}, + }, + wantWarningCount: 5, }, "manual selector; do not generate labels": { job: &batch.Job{ @@ -1419,10 +1481,11 @@ func TestJobStrategy_Validate(t *testing.T) { job: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: nil, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: validSelector.MatchLabels, + Labels: labelsWithNonBatch, }, Spec: validPodSpec, }, @@ -1435,7 +1498,8 @@ func TestJobStrategy_Validate(t *testing.T) { wantJob: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{batch.ControllerUidLabel: string(theUID)}}, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labelsWithNonBatch, @@ -1453,10 +1517,11 @@ func TestJobStrategy_Validate(t *testing.T) { job: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: nil, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: validSelector.MatchLabels, + Labels: labelsWithNonBatch, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, @@ -1470,7 +1535,8 @@ func TestJobStrategy_Validate(t *testing.T) { wantJob: &batch.Job{ ObjectMeta: validObjectMeta, Spec: batch.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{batch.ControllerUidLabel: string(theUID)}}, + Selector: defaultSelector, + ManualSelector: pointer.Bool(false), Template: api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labelsWithNonBatch, @@ -1949,9 +2015,14 @@ func getValidObjectMeta(generation int64) metav1.ObjectMeta { return getValidObjectMetaWithAnnotations(generation, nil) } +func getValidUID() types.UID { + return "1a2b3c4d5e6f7g8h9i0k" +} + func getValidObjectMetaWithAnnotations(generation int64, annotations map[string]string) metav1.ObjectMeta { return metav1.ObjectMeta{ Name: "myjob", + UID: getValidUID(), Namespace: metav1.NamespaceDefault, Generation: generation, Annotations: annotations, @@ -1964,6 +2035,16 @@ func getValidLabelSelector() *metav1.LabelSelector { } } +func getValidBatchLabels() map[string]string { + theUID := getValidUID() + return map[string]string{batch.LegacyJobNameLabel: "myjob", batch.JobNameLabel: "myjob", batch.LegacyControllerUidLabel: string(theUID), batch.ControllerUidLabel: string(theUID)} +} + +func getValidBatchLabelsWithNonBatch() map[string]string { + theUID := getValidUID() + return map[string]string{"a": "b", batch.LegacyJobNameLabel: "myjob", batch.JobNameLabel: "myjob", batch.LegacyControllerUidLabel: string(theUID), batch.ControllerUidLabel: string(theUID)} +} + func getValidPodTemplateSpecForSelector(validSelector *metav1.LabelSelector) api.PodTemplateSpec { return api.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/registry/core/pod/rest/subresources.go b/pkg/registry/core/pod/rest/subresources.go index 76e0cdd4ffb52..0e031412fbdea 100644 --- a/pkg/registry/core/pod/rest/subresources.go +++ b/pkg/registry/core/pod/rest/subresources.go @@ -23,12 +23,16 @@ import ( "net/url" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/httpstream/wsstream" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/proxy" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + utilfeature "k8s.io/apiserver/pkg/util/feature" + translator "k8s.io/apiserver/pkg/util/proxy" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/registry/core/pod" ) @@ -113,7 +117,21 @@ func (r *AttachREST) Connect(ctx context.Context, name string, opts runtime.Obje if err != nil { return nil, err } - return newThrottledUpgradeAwareProxyHandler(location, transport, false, true, responder), nil + handler := newThrottledUpgradeAwareProxyHandler(location, transport, false, true, responder) + if utilfeature.DefaultFeatureGate.Enabled(features.TranslateStreamCloseWebsocketRequests) { + // Wrap the upgrade aware handler to implement stream translation + // for WebSocket/V5 upgrade requests. + streamOptions := translator.Options{ + Stdin: attachOpts.Stdin, + Stdout: attachOpts.Stdout, + Stderr: attachOpts.Stderr, + Tty: attachOpts.TTY, + } + maxBytesPerSec := capabilities.Get().PerConnectionBandwidthLimitBytesPerSec + streamtranslator := translator.NewStreamTranslatorHandler(location, transport, maxBytesPerSec, streamOptions) + handler = translator.NewTranslatingHandler(handler, streamtranslator, wsstream.IsWebSocketRequestWithStreamCloseProtocol) + } + return handler, nil } // NewConnectOptions returns the versioned object that represents exec parameters @@ -156,7 +174,21 @@ func (r *ExecREST) Connect(ctx context.Context, name string, opts runtime.Object if err != nil { return nil, err } - return newThrottledUpgradeAwareProxyHandler(location, transport, false, true, responder), nil + handler := newThrottledUpgradeAwareProxyHandler(location, transport, false, true, responder) + if utilfeature.DefaultFeatureGate.Enabled(features.TranslateStreamCloseWebsocketRequests) { + // Wrap the upgrade aware handler to implement stream translation + // for WebSocket/V5 upgrade requests. + streamOptions := translator.Options{ + Stdin: execOpts.Stdin, + Stdout: execOpts.Stdout, + Stderr: execOpts.Stderr, + Tty: execOpts.TTY, + } + maxBytesPerSec := capabilities.Get().PerConnectionBandwidthLimitBytesPerSec + streamtranslator := translator.NewStreamTranslatorHandler(location, transport, maxBytesPerSec, streamOptions) + handler = translator.NewTranslatingHandler(handler, streamtranslator, wsstream.IsWebSocketRequestWithStreamCloseProtocol) + } + return handler, nil } // NewConnectOptions returns the versioned object that represents exec parameters @@ -213,7 +245,7 @@ func (r *PortForwardREST) Connect(ctx context.Context, name string, opts runtime return newThrottledUpgradeAwareProxyHandler(location, transport, false, true, responder), nil } -func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder rest.Responder) *proxy.UpgradeAwareHandler { +func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder rest.Responder) http.Handler { handler := proxy.NewUpgradeAwareHandler(location, transport, wrapTransport, upgradeRequired, proxy.NewErrorResponder(responder)) handler.MaxBytesPerSec = capabilities.Get().PerConnectionBandwidthLimitBytesPerSec return handler diff --git a/pkg/registry/core/pod/strategy.go b/pkg/registry/core/pod/strategy.go index f638eb64e2e5e..c31ee7b018054 100644 --- a/pkg/registry/core/pod/strategy.go +++ b/pkg/registry/core/pod/strategy.go @@ -90,6 +90,7 @@ func (podStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { podutil.DropDisabledPodFields(pod, nil) applyWaitingForSchedulingGatesCondition(pod) + mutatePodAffinity(pod) } // PrepareForUpdate clears fields that are not allowed to be set by end users on update. @@ -669,6 +670,56 @@ func validateContainer(container string, pod *api.Pod) (string, error) { return container, nil } +// applyLabelKeysToLabelSelector obtains the label value from the given label set by the key in labelKeys, +// and merge to LabelSelector with the given operator: +func applyLabelKeysToLabelSelector(labelSelector *metav1.LabelSelector, labelKeys []string, operator metav1.LabelSelectorOperator, podLabels map[string]string) { + for _, key := range labelKeys { + if value, ok := podLabels[key]; ok { + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, metav1.LabelSelectorRequirement{ + Key: key, + Operator: operator, + Values: []string{value}, + }) + } + } +} + +// applyMatchLabelKeysAndMismatchLabelKeys obtains the labels from the pod labels by the key in matchLabelKeys or mismatchLabelKeys, +// and merge to LabelSelector of PodAffinityTerm depending on field: +// - If matchLabelKeys, key in (value) is merged with LabelSelector. +// - If mismatchLabelKeys, key notin (value) is merged with LabelSelector. +func applyMatchLabelKeysAndMismatchLabelKeys(term *api.PodAffinityTerm, label map[string]string) { + if (len(term.MatchLabelKeys) == 0 && len(term.MismatchLabelKeys) == 0) || term.LabelSelector == nil { + // If LabelSelector is nil, we don't need to apply label keys to it because nil-LabelSelector is match none. + return + } + + applyLabelKeysToLabelSelector(term.LabelSelector, term.MatchLabelKeys, metav1.LabelSelectorOpIn, label) + applyLabelKeysToLabelSelector(term.LabelSelector, term.MismatchLabelKeys, metav1.LabelSelectorOpNotIn, label) +} + +func mutatePodAffinity(pod *api.Pod) { + if !utilfeature.DefaultFeatureGate.Enabled(features.MatchLabelKeysInPodAffinity) || pod.Spec.Affinity == nil { + return + } + if affinity := pod.Spec.Affinity.PodAffinity; affinity != nil { + for i := range affinity.PreferredDuringSchedulingIgnoredDuringExecution { + applyMatchLabelKeysAndMismatchLabelKeys(&affinity.PreferredDuringSchedulingIgnoredDuringExecution[i].PodAffinityTerm, pod.Labels) + } + for i := range affinity.RequiredDuringSchedulingIgnoredDuringExecution { + applyMatchLabelKeysAndMismatchLabelKeys(&affinity.RequiredDuringSchedulingIgnoredDuringExecution[i], pod.Labels) + } + } + if affinity := pod.Spec.Affinity.PodAntiAffinity; affinity != nil { + for i := range affinity.PreferredDuringSchedulingIgnoredDuringExecution { + applyMatchLabelKeysAndMismatchLabelKeys(&affinity.PreferredDuringSchedulingIgnoredDuringExecution[i].PodAffinityTerm, pod.Labels) + } + for i := range affinity.RequiredDuringSchedulingIgnoredDuringExecution { + applyMatchLabelKeysAndMismatchLabelKeys(&affinity.RequiredDuringSchedulingIgnoredDuringExecution[i], pod.Labels) + } + } +} + // applyWaitingForSchedulingGatesCondition adds a {type:PodScheduled, reason:WaitingForGates} condition // to a new-created Pod if necessary. func applyWaitingForSchedulingGatesCondition(pod *api.Pod) { diff --git a/pkg/registry/core/pod/strategy_test.go b/pkg/registry/core/pod/strategy_test.go index 76f589a164202..6317af86da641 100644 --- a/pkg/registry/core/pod/strategy_test.go +++ b/pkg/registry/core/pod/strategy_test.go @@ -1672,3 +1672,362 @@ func TestNodeInclusionPolicyEnablementInUpdating(t *testing.T) { t.Error("NodeInclusionPolicy updated with unexpected result") } } + +func Test_mutatePodAffinity(t *testing.T) { + tests := []struct { + name string + pod *api.Pod + wantPod *api.Pod + featureGateEnabled bool + }{ + { + name: "matchLabelKeys are merged into labelSelector with In and mismatchLabelKeys are merged with NotIn", + featureGateEnabled: true, + pod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + }, + }, + }, + wantPod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "country", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"Japan"}, + }, + { + Key: "city", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"Kyoto"}, + }, + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "country", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"Japan"}, + }, + { + Key: "city", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"Kyoto"}, + }, + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + PodAntiAffinity: &api.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "country", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"Japan"}, + }, + { + Key: "city", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"Kyoto"}, + }, + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []api.WeightedPodAffinityTerm{ + { + PodAffinityTerm: api.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "country", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"Japan"}, + }, + { + Key: "city", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"Kyoto"}, + }, + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "keys, which are not found in Pod labels, are ignored", + featureGateEnabled: true, + pod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"city", "not-found"}, + }, + }, + }, + }, + }, + }, + wantPod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + // "city" should be added correctly even if matchLabelKey has "not-found" key. + { + Key: "city", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"Kyoto"}, + }, + }, + }, + MatchLabelKeys: []string{"city", "not-found"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "matchLabelKeys is ignored if the labelSelector is nil", + featureGateEnabled: true, + pod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + }, + }, + wantPod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "the feature gate is disabled and matchLabelKeys is ignored", + pod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + }, + }, + wantPod: &api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "country": "Japan", + "city": "Kyoto", + }, + }, + Spec: api.PodSpec{ + Affinity: &api.Affinity{ + PodAffinity: &api.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "region": "Asia", + }, + }, + MatchLabelKeys: []string{"country"}, + MismatchLabelKeys: []string{"city"}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodAffinity, tc.featureGateEnabled)() + + pod := tc.pod + mutatePodAffinity(pod) + if diff := cmp.Diff(tc.wantPod.Spec.Affinity, pod.Spec.Affinity); diff != "" { + t.Errorf("unexpected affinity (-want, +got): %s\n", diff) + } + }) + } +} diff --git a/pkg/registry/core/rest/storage_core_generic.go b/pkg/registry/core/rest/storage_core_generic.go index 3a61ad3417ad3..2299481e878f3 100644 --- a/pkg/registry/core/rest/storage_core_generic.go +++ b/pkg/registry/core/rest/storage_core_generic.go @@ -17,9 +17,14 @@ limitations under the License. package rest import ( + "context" "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" @@ -90,9 +95,9 @@ func (c *GenericConfig) NewRESTStorage(apiResourceConfigSource serverstorage.API var serviceAccountStorage *serviceaccountstore.REST if c.ServiceAccountIssuer != nil { - serviceAccountStorage, err = serviceaccountstore.NewREST(restOptionsGetter, c.ServiceAccountIssuer, c.APIAudiences, c.ServiceAccountMaxExpiration, nil, secretStorage.Store, c.ExtendExpiration) + serviceAccountStorage, err = serviceaccountstore.NewREST(restOptionsGetter, c.ServiceAccountIssuer, c.APIAudiences, c.ServiceAccountMaxExpiration, newNotFoundGetter(schema.GroupResource{Resource: "pods"}), secretStorage.Store, c.ExtendExpiration) } else { - serviceAccountStorage, err = serviceaccountstore.NewREST(restOptionsGetter, nil, nil, 0, nil, nil, false) + serviceAccountStorage, err = serviceaccountstore.NewREST(restOptionsGetter, nil, nil, 0, newNotFoundGetter(schema.GroupResource{Resource: "pods"}), newNotFoundGetter(schema.GroupResource{Resource: "secrets"}), false) } if err != nil { return genericapiserver.APIGroupInfo{}, err @@ -139,3 +144,15 @@ func (c *GenericConfig) NewRESTStorage(apiResourceConfigSource serverstorage.API func (c *GenericConfig) GroupName() string { return api.GroupName } + +func newNotFoundGetter(gr schema.GroupResource) rest.Getter { + return notFoundGetter{gr: gr} +} + +type notFoundGetter struct { + gr schema.GroupResource +} + +func (g notFoundGetter) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + return nil, errors.NewNotFound(g.gr, name) +} diff --git a/pkg/registry/core/service/ipallocator/controller/metrics.go b/pkg/registry/core/service/ipallocator/controller/metrics.go new file mode 100644 index 0000000000000..8bfe6b2303968 --- /dev/null +++ b/pkg/registry/core/service/ipallocator/controller/metrics.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "sync" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "clusterip_repair" +) + +var ( + // clusterIPRepairIPErrors indicates the number of errors found by the repair loop + // divided by the type of error: + // leak, repair, full, outOfRange, duplicate, invalid, unknown + clusterIPRepairIPErrors = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "ip_errors_total", + Help: "Number of errors detected on clusterips by the repair loop broken down by type of error: leak, repair, full, outOfRange, duplicate, unknown, invalid", + StabilityLevel: metrics.ALPHA, + }, + []string{"type"}, + ) + // clusterIPRepairReconcileErrors indicates the number of times the repair loop has failed to repair + // the errors it detected. + clusterIPRepairReconcileErrors = metrics.NewCounter( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "reconcile_errors_total", + Help: "Number of reconciliation failures on the clusterip repair reconcile loop", + StabilityLevel: metrics.ALPHA, + }, + ) +) + +var registerMetricsOnce sync.Once + +func registerMetrics() { + registerMetricsOnce.Do(func() { + legacyregistry.MustRegister(clusterIPRepairIPErrors) + legacyregistry.MustRegister(clusterIPRepairReconcileErrors) + }) +} diff --git a/pkg/registry/core/service/ipallocator/controller/repair.go b/pkg/registry/core/service/ipallocator/controller/repair.go index f568e4fdb59c4..ba4ce975582e1 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair.go +++ b/pkg/registry/core/service/ipallocator/controller/repair.go @@ -101,6 +101,8 @@ func NewRepair(interval time.Duration, serviceClient corev1client.ServicesGetter leaksByFamily[secondary] = make(map[string]int) } + registerMetrics() + return &Repair{ interval: interval, serviceClient: serviceClient, @@ -131,7 +133,13 @@ func (c *Repair) RunUntil(onFirstSuccess func(), stopCh chan struct{}) { // runOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs. func (c *Repair) runOnce() error { - return retry.RetryOnConflict(retry.DefaultBackoff, c.doRunOnce) + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + err := c.doRunOnce() + if err != nil { + clusterIPRepairReconcileErrors.Inc() + } + return err + }) } // doRunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs. @@ -222,6 +230,7 @@ func (c *Repair) doRunOnce() error { ip := netutils.ParseIPSloppy(ip) if ip == nil { // cluster IP is corrupt + clusterIPRepairIPErrors.WithLabelValues("invalid").Inc() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "ClusterIPNotValid", "ClusterIPValidation", "Cluster IP %s is not a valid IP; please recreate service", ip) runtime.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not a valid IP; please recreate", ip, svc.Name, svc.Namespace)) continue @@ -230,6 +239,7 @@ func (c *Repair) doRunOnce() error { family := getFamilyByIP(ip) if _, ok := rebuiltByFamily[family]; !ok { // this service is using an IPFamily no longer configured on cluster + clusterIPRepairIPErrors.WithLabelValues("invalid").Inc() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "ClusterIPNotValid", "ClusterIPValidation", "Cluster IP %s(%s) is of ip family that is no longer configured on cluster; please recreate service", ip, family) runtime.HandleError(fmt.Errorf("the cluster IP %s(%s) for service %s/%s is of ip family that is no longer configured on cluster; please recreate", ip, family, svc.Name, svc.Namespace)) continue @@ -245,24 +255,29 @@ func (c *Repair) doRunOnce() error { actualStored.Release(ip) } else { // cluster IP doesn't seem to be allocated + clusterIPRepairIPErrors.WithLabelValues("repair").Inc() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "Cluster IP [%v]:%s is not allocated; repairing", family, ip) runtime.HandleError(fmt.Errorf("the cluster IP [%v]:%s for service %s/%s is not allocated; repairing", family, ip, svc.Name, svc.Namespace)) } delete(c.leaksByFamily[family], ip.String()) // it is used, so it can't be leaked case ipallocator.ErrAllocated: // cluster IP is duplicate + clusterIPRepairIPErrors.WithLabelValues("duplicate").Inc() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "ClusterIPAlreadyAllocated", "ClusterIPAllocation", "Cluster IP [%v]:%s was assigned to multiple services; please recreate service", family, ip) runtime.HandleError(fmt.Errorf("the cluster IP [%v]:%s for service %s/%s was assigned to multiple services; please recreate", family, ip, svc.Name, svc.Namespace)) case err.(*ipallocator.ErrNotInRange): // cluster IP is out of range + clusterIPRepairIPErrors.WithLabelValues("outOfRange").Inc() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "ClusterIPOutOfRange", "ClusterIPAllocation", "Cluster IP [%v]:%s is not within the service CIDR %s; please recreate service", family, ip, c.networkByFamily[family]) runtime.HandleError(fmt.Errorf("the cluster IP [%v]:%s for service %s/%s is not within the service CIDR %s; please recreate", family, ip, svc.Name, svc.Namespace, c.networkByFamily[family])) case ipallocator.ErrFull: // somehow we are out of IPs + clusterIPRepairIPErrors.WithLabelValues("full").Inc() cidr := actualAlloc.CIDR() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "ServiceCIDRFull", "ClusterIPAllocation", "Service CIDR %v is full; you must widen the CIDR in order to create new services for Cluster IP [%v]:%s", cidr, family, ip) return fmt.Errorf("the service CIDR %v is full; you must widen the CIDR in order to create new services for Cluster IP [%v]:%s", cidr, family, ip) default: + clusterIPRepairIPErrors.WithLabelValues("unknown").Inc() c.recorder.Eventf(&svc, nil, v1.EventTypeWarning, "UnknownError", "ClusterIPAllocation", "Unable to allocate cluster IP [%v]:%s due to an unknown error", family, ip) return fmt.Errorf("unable to allocate cluster IP [%v]:%s for service %s/%s due to an unknown error, exiting: %v", family, ip, svc.Name, svc.Namespace, err) } @@ -314,9 +329,11 @@ func (c *Repair) checkLeaked(leaks map[string]int, stored ipallocator.Interface, // pretend it is still in use until count expires leaks[ip.String()] = count - 1 if err := rebuilt.Allocate(ip); err != nil { + // do not increment the metric here, if it is a leak it will be detected once the counter gets to 0 runtime.HandleError(fmt.Errorf("the cluster IP %s may have leaked, but can not be allocated: %v", ip, err)) } default: + clusterIPRepairIPErrors.WithLabelValues("leak").Inc() // do not add it to the rebuilt set, which means it will be available for reuse runtime.HandleError(fmt.Errorf("the cluster IP %s appears to have leaked: cleaning up", ip)) } diff --git a/pkg/registry/core/service/ipallocator/controller/repair_test.go b/pkg/registry/core/service/ipallocator/controller/repair_test.go index a3d2144ecc3cb..2fdc06db5aaaf 100644 --- a/pkg/registry/core/service/ipallocator/controller/repair_test.go +++ b/pkg/registry/core/service/ipallocator/controller/repair_test.go @@ -25,6 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + "k8s.io/component-base/metrics/testutil" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" netutils "k8s.io/utils/net" @@ -77,6 +78,8 @@ func TestRepair(t *testing.T) { } func TestRepairLeak(t *testing.T) { + clearMetrics() + _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24") previous, err := ipallocator.NewInMemory(cidr) if err != nil { @@ -126,9 +129,21 @@ func TestRepairLeak(t *testing.T) { if after.Has(netutils.ParseIPSloppy("192.168.1.10")) { t.Errorf("expected ipallocator to not have leaked IP") } + em := testMetrics{ + leak: 1, + repair: 0, + outOfRange: 0, + duplicate: 0, + unknown: 0, + invalid: 0, + full: 0, + } + expectMetrics(t, em) } func TestRepairWithExisting(t *testing.T) { + clearMetrics() + _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24") previous, err := ipallocator.NewInMemory(cidr) if err != nil { @@ -213,6 +228,16 @@ func TestRepairWithExisting(t *testing.T) { if free := after.Free(); free != 252 { t.Errorf("unexpected ipallocator state: %d free (expected 252)", free) } + em := testMetrics{ + leak: 0, + repair: 2, + outOfRange: 1, + duplicate: 1, + unknown: 0, + invalid: 0, + full: 0, + } + expectMetrics(t, em) } func makeRangeRegistry(t *testing.T, cidrRange string) *mockRangeRegistry { @@ -323,6 +348,8 @@ func TestShouldWorkOnSecondary(t *testing.T) { } func TestRepairDualStack(t *testing.T) { + clearMetrics() + fakeClient := fake.NewSimpleClientset() ipregistry := &mockRangeRegistry{ item: &api.RangeAllocation{Range: "192.168.1.0/24"}, @@ -345,6 +372,14 @@ func TestRepairDualStack(t *testing.T) { t.Errorf("unexpected ipregistry: %#v", ipregistry) } + repairErrors, err := testutil.GetCounterMetricValue(clusterIPRepairReconcileErrors) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairReconcileErrors.Name, err) + } + if repairErrors != 0 { + t.Fatalf("0 error expected, got %v", repairErrors) + } + ipregistry = &mockRangeRegistry{ item: &api.RangeAllocation{Range: "192.168.1.0/24"}, updateErr: fmt.Errorf("test error"), @@ -358,9 +393,17 @@ func TestRepairDualStack(t *testing.T) { if err := r.runOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } + repairErrors, err = testutil.GetCounterMetricValue(clusterIPRepairReconcileErrors) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairReconcileErrors.Name, err) + } + if repairErrors != 1 { + t.Fatalf("1 error expected, got %v", repairErrors) + } } func TestRepairLeakDualStack(t *testing.T) { + clearMetrics() _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24") previous, err := ipallocator.NewInMemory(cidr) if err != nil { @@ -449,9 +492,22 @@ func TestRepairLeakDualStack(t *testing.T) { if secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) { t.Errorf("expected ipallocator to not have leaked IP") } + + em := testMetrics{ + leak: 2, + repair: 0, + outOfRange: 0, + duplicate: 0, + unknown: 0, + invalid: 0, + full: 0, + } + expectMetrics(t, em) + } func TestRepairWithExistingDualStack(t *testing.T) { + clearMetrics() // because anything (other than allocator) depends // on families assigned to service (not the value of IPFamilyPolicy) // we can saftly create tests that has ipFamilyPolicy:nil @@ -621,4 +677,67 @@ func TestRepairWithExistingDualStack(t *testing.T) { if free := secondaryAfter.Free(); free != 65533 { t.Errorf("unexpected ipallocator state: %d free (number of free ips is not 65532)", free) } + em := testMetrics{ + leak: 0, + repair: 5, + outOfRange: 6, + duplicate: 3, + unknown: 0, + invalid: 0, + full: 0, + } + expectMetrics(t, em) +} + +// Metrics helpers +func clearMetrics() { + clusterIPRepairIPErrors.Reset() + clusterIPRepairReconcileErrors.Reset() +} + +type testMetrics struct { + leak float64 + repair float64 + outOfRange float64 + full float64 + duplicate float64 + invalid float64 + unknown float64 +} + +func expectMetrics(t *testing.T, em testMetrics) { + var m testMetrics + var err error + + m.leak, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("leak")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + m.repair, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("repair")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + m.outOfRange, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("outOfRange")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + m.duplicate, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("duplicate")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + m.invalid, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("invalid")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + m.full, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("full")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + m.unknown, err = testutil.GetCounterMetricValue(clusterIPRepairIPErrors.WithLabelValues("unknown")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", clusterIPRepairIPErrors.Name, err) + } + if m != em { + t.Fatalf("metrics error: expected %v, received %v", em, m) + } } diff --git a/pkg/registry/core/service/ipallocator/controller/repairip.go b/pkg/registry/core/service/ipallocator/controller/repairip.go index fec0550ec6d78..aa4b19cc56ac1 100644 --- a/pkg/registry/core/service/ipallocator/controller/repairip.go +++ b/pkg/registry/core/service/ipallocator/controller/repairip.go @@ -552,7 +552,6 @@ func serviceToRef(svc *v1.Service) *networkingv1alpha1.ParentReference { Resource: "services", Namespace: svc.Namespace, Name: svc.Name, - UID: svc.UID, } } diff --git a/pkg/registry/core/service/ipallocator/ipallocator.go b/pkg/registry/core/service/ipallocator/ipallocator.go index d407ddf5fdae9..b5e49bd37a4b1 100644 --- a/pkg/registry/core/service/ipallocator/ipallocator.go +++ b/pkg/registry/core/service/ipallocator/ipallocator.go @@ -571,6 +571,5 @@ func serviceToRef(svc *api.Service) *networkingv1alpha1.ParentReference { Resource: "services", Namespace: svc.Namespace, Name: svc.Name, - UID: svc.UID, } } diff --git a/pkg/registry/core/service/portallocator/controller/metrics.go b/pkg/registry/core/service/portallocator/controller/metrics.go new file mode 100644 index 0000000000000..c6766a9003be1 --- /dev/null +++ b/pkg/registry/core/service/portallocator/controller/metrics.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "sync" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "apiserver" + subsystem = "nodeport_repair" +) + +var ( + // nodePortRepairPortErrors indicates the number of errors found by the repair loop + // divided by the type of error: + // leak, repair, full, outOfRange, duplicate, unknown + nodePortRepairPortErrors = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "port_errors_total", + Help: "Number of errors detected on ports by the repair loop broken down by type of error: leak, repair, full, outOfRange, duplicate, unknown", + StabilityLevel: metrics.ALPHA, + }, + []string{"type"}, + ) + // nodePortRepairReconcileErrors indicates the number of times the repair loop has failed to repair + // the errors it detected. + nodePortRepairReconcileErrors = metrics.NewCounter( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "reconcile_errors_total", + Help: "Number of reconciliation failures on the nodeport repair reconcile loop", + StabilityLevel: metrics.ALPHA, + }, + ) +) + +var registerMetricsOnce sync.Once + +func registerMetrics() { + registerMetricsOnce.Do(func() { + legacyregistry.MustRegister(nodePortRepairPortErrors) + legacyregistry.MustRegister(nodePortRepairReconcileErrors) + }) +} diff --git a/pkg/registry/core/service/portallocator/controller/repair.go b/pkg/registry/core/service/portallocator/controller/repair.go index fa87076049082..e76695a4c0f0c 100644 --- a/pkg/registry/core/service/portallocator/controller/repair.go +++ b/pkg/registry/core/service/portallocator/controller/repair.go @@ -61,6 +61,8 @@ func NewRepair(interval time.Duration, serviceClient corev1client.ServicesGetter eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: eventClient}) recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, "portallocator-repair-controller") + registerMetrics() + return &Repair{ interval: interval, serviceClient: serviceClient, @@ -89,7 +91,13 @@ func (c *Repair) RunUntil(onFirstSuccess func(), stopCh chan struct{}) { // runOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs. func (c *Repair) runOnce() error { - return retry.RetryOnConflict(retry.DefaultBackoff, c.doRunOnce) + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + err := c.doRunOnce() + if err != nil { + nodePortRepairReconcileErrors.Inc() + } + return err + }) } // doRunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs. @@ -153,23 +161,28 @@ func (c *Repair) doRunOnce() error { stored.Release(port) } else { // doesn't seem to be allocated + nodePortRepairPortErrors.WithLabelValues("repair").Inc() c.recorder.Eventf(svc, nil, corev1.EventTypeWarning, "PortNotAllocated", "PortAllocation", "Port %d is not allocated; repairing", port) runtime.HandleError(fmt.Errorf("the node port %d for service %s/%s is not allocated; repairing", port, svc.Name, svc.Namespace)) } delete(c.leaks, port) // it is used, so it can't be leaked case portallocator.ErrAllocated: // port is duplicate, reallocate + nodePortRepairPortErrors.WithLabelValues("duplicate").Inc() c.recorder.Eventf(svc, nil, corev1.EventTypeWarning, "PortAlreadyAllocated", "PortAllocation", "Port %d was assigned to multiple services; please recreate service", port) runtime.HandleError(fmt.Errorf("the node port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace)) case err.(*portallocator.ErrNotInRange): // port is out of range, reallocate + nodePortRepairPortErrors.WithLabelValues("outOfRange").Inc() c.recorder.Eventf(svc, nil, corev1.EventTypeWarning, "PortOutOfRange", "PortAllocation", "Port %d is not within the port range %s; please recreate service", port, c.portRange) runtime.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %s; please recreate", port, svc.Name, svc.Namespace, c.portRange)) case portallocator.ErrFull: // somehow we are out of ports + nodePortRepairPortErrors.WithLabelValues("full").Inc() c.recorder.Eventf(svc, nil, corev1.EventTypeWarning, "PortRangeFull", "PortAllocation", "Port range %s is full; you must widen the port range in order to create new services", c.portRange) return fmt.Errorf("the port range %s is full; you must widen the port range in order to create new services", c.portRange) default: + nodePortRepairPortErrors.WithLabelValues("unknown").Inc() c.recorder.Eventf(svc, nil, corev1.EventTypeWarning, "UnknownError", "PortAllocation", "Unable to allocate port %d due to an unknown error", port) return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err) } @@ -189,9 +202,11 @@ func (c *Repair) doRunOnce() error { // pretend it is still in use until count expires c.leaks[port] = count - 1 if err := rebuilt.Allocate(port); err != nil { + // do not increment the metric here, if it is a leak it will be detected once the counter gets to 0 runtime.HandleError(fmt.Errorf("the node port %d may have leaked, but can not be allocated: %v", port, err)) } default: + nodePortRepairPortErrors.WithLabelValues("leak").Inc() // do not add it to the rebuilt set, which means it will be available for reuse runtime.HandleError(fmt.Errorf("the node port %d appears to have leaked: cleaning up", port)) } diff --git a/pkg/registry/core/service/portallocator/controller/repair_test.go b/pkg/registry/core/service/portallocator/controller/repair_test.go index 9869d32fadf12..c0f3681f57d83 100644 --- a/pkg/registry/core/service/portallocator/controller/repair_test.go +++ b/pkg/registry/core/service/portallocator/controller/repair_test.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/kubernetes/fake" + "k8s.io/component-base/metrics/testutil" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" ) @@ -53,6 +54,7 @@ func (r *mockRangeRegistry) CreateOrUpdate(alloc *api.RangeAllocation) error { } func TestRepair(t *testing.T) { + clearMetrics() fakeClient := fake.NewSimpleClientset() registry := &mockRangeRegistry{ item: &api.RangeAllocation{Range: "100-200"}, @@ -66,6 +68,13 @@ func TestRepair(t *testing.T) { if !registry.updateCalled || registry.updated == nil || registry.updated.Range != pr.String() || registry.updated != registry.item { t.Errorf("unexpected registry: %#v", registry) } + repairErrors, err := testutil.GetCounterMetricValue(nodePortRepairReconcileErrors) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairReconcileErrors.Name, err) + } + if repairErrors != 0 { + t.Fatalf("0 error expected, got %v", repairErrors) + } registry = &mockRangeRegistry{ item: &api.RangeAllocation{Range: "100-200"}, @@ -75,9 +84,18 @@ func TestRepair(t *testing.T) { if err := r.runOnce(); !strings.Contains(err.Error(), ": test error") { t.Fatal(err) } + repairErrors, err = testutil.GetCounterMetricValue(nodePortRepairReconcileErrors) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairReconcileErrors.Name, err) + } + if repairErrors != 1 { + t.Fatalf("1 error expected, got %v", repairErrors) + } } func TestRepairLeak(t *testing.T) { + clearMetrics() + pr, _ := net.ParsePortRange("100-200") previous, err := portallocator.NewInMemory(*pr) if err != nil { @@ -127,9 +145,18 @@ func TestRepairLeak(t *testing.T) { if after.Has(111) { t.Errorf("expected portallocator to not have leaked port") } + em := testMetrics{ + leak: 1, + repair: 0, + outOfRange: 0, + duplicate: 0, + unknown: 0, + } + expectMetrics(t, em) } func TestRepairWithExisting(t *testing.T) { + clearMetrics() pr, _ := net.ParsePortRange("100-200") previous, err := portallocator.NewInMemory(*pr) if err != nil { @@ -204,6 +231,14 @@ func TestRepairWithExisting(t *testing.T) { if free := after.Free(); free != 97 { t.Errorf("unexpected portallocator state: %d free", free) } + em := testMetrics{ + leak: 0, + repair: 4, + outOfRange: 1, + duplicate: 1, + unknown: 0, + } + expectMetrics(t, em) } func TestCollectServiceNodePorts(t *testing.T) { @@ -303,3 +338,51 @@ func TestCollectServiceNodePorts(t *testing.T) { }) } } + +// Metrics helpers +func clearMetrics() { + nodePortRepairPortErrors.Reset() + nodePortRepairReconcileErrors.Reset() +} + +type testMetrics struct { + leak float64 + repair float64 + outOfRange float64 + duplicate float64 + unknown float64 + full float64 +} + +func expectMetrics(t *testing.T, em testMetrics) { + var m testMetrics + var err error + + m.leak, err = testutil.GetCounterMetricValue(nodePortRepairPortErrors.WithLabelValues("leak")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairPortErrors.Name, err) + } + m.repair, err = testutil.GetCounterMetricValue(nodePortRepairPortErrors.WithLabelValues("repair")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairPortErrors.Name, err) + } + m.outOfRange, err = testutil.GetCounterMetricValue(nodePortRepairPortErrors.WithLabelValues("outOfRange")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairPortErrors.Name, err) + } + m.duplicate, err = testutil.GetCounterMetricValue(nodePortRepairPortErrors.WithLabelValues("duplicate")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairPortErrors.Name, err) + } + m.unknown, err = testutil.GetCounterMetricValue(nodePortRepairPortErrors.WithLabelValues("unknown")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairPortErrors.Name, err) + } + m.full, err = testutil.GetCounterMetricValue(nodePortRepairPortErrors.WithLabelValues("full")) + if err != nil { + t.Errorf("failed to get %s value, err: %v", nodePortRepairPortErrors.Name, err) + } + if m != em { + t.Fatalf("metrics error: expected %v, received %v", em, m) + } +} diff --git a/pkg/registry/core/service/storage/storage.go b/pkg/registry/core/service/storage/storage.go index 880b0127bf7f4..b659aaad63068 100644 --- a/pkg/registry/core/service/storage/storage.go +++ b/pkg/registry/core/service/storage/storage.go @@ -127,6 +127,11 @@ func NewREST( store.BeginCreate = genericStore.beginCreate store.BeginUpdate = genericStore.beginUpdate + // users can patch the status to remove the finalizer, + // hence statusStore must participate on the AfterDelete + // hook to release the allocated resources + statusStore.AfterDelete = genericStore.afterDelete + return genericStore, &StatusREST{store: &statusStore}, &svcreg.ProxyREST{Redirector: genericStore, ProxyTransport: proxyTransport}, nil } diff --git a/pkg/registry/core/service/storage/storage_test.go b/pkg/registry/core/service/storage/storage_test.go index 73292a30ba567..a1ec36f630fed 100644 --- a/pkg/registry/core/service/storage/storage_test.go +++ b/pkg/registry/core/service/storage/storage_test.go @@ -11671,7 +11671,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) { if tc.err == false && err != nil { t.Fatalf("unexpected error: %v", err) } - if tc.err == true && err == nil { + if tc.err && err == nil { t.Fatalf("unexpected success") } if !tc.err { diff --git a/pkg/registry/networking/clustercidr/storage/storage.go b/pkg/registry/networking/clustercidr/storage/storage.go deleted file mode 100644 index 3c0f44b9ff8e2..0000000000000 --- a/pkg/registry/networking/clustercidr/storage/storage.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/apiserver/pkg/registry/rest" - networkingapi "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/printers" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - printerstorage "k8s.io/kubernetes/pkg/printers/storage" - "k8s.io/kubernetes/pkg/registry/networking/clustercidr" -) - -// REST implements a RESTStorage for ClusterCIDRs against etcd. -type REST struct { - *genericregistry.Store -} - -// NewREST returns a RESTStorage object that will work against ClusterCIDRs. -func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) { - store := &genericregistry.Store{ - NewFunc: func() runtime.Object { return &networkingapi.ClusterCIDR{} }, - NewListFunc: func() runtime.Object { return &networkingapi.ClusterCIDRList{} }, - DefaultQualifiedResource: networkingapi.Resource("clustercidrs"), - SingularQualifiedResource: networkingapi.Resource("clustercidr"), - - CreateStrategy: clustercidr.Strategy, - UpdateStrategy: clustercidr.Strategy, - DeleteStrategy: clustercidr.Strategy, - - TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, - } - options := &generic.StoreOptions{RESTOptions: optsGetter} - if err := store.CompleteWithOptions(options); err != nil { - return nil, err - } - - return &REST{store}, nil -} - -// Implement ShortNamesProvider. -var _ rest.ShortNamesProvider = &REST{} - -// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource. -func (r *REST) ShortNames() []string { - return []string{"cc"} -} diff --git a/pkg/registry/networking/clustercidr/storage/storage_test.go b/pkg/registry/networking/clustercidr/storage/storage_test.go deleted file mode 100644 index 774ec59f54d45..0000000000000 --- a/pkg/registry/networking/clustercidr/storage/storage_test.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing" - etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/networking" - _ "k8s.io/kubernetes/pkg/apis/networking/install" - "k8s.io/kubernetes/pkg/registry/registrytest" -) - -func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) { - etcdStorage, server := registrytest.NewEtcdStorageForResource(t, networking.Resource("clustercidrs")) - restOptions := generic.RESTOptions{ - StorageConfig: etcdStorage, - Decorator: generic.UndecoratedStorage, - DeleteCollectionWorkers: 1, - ResourcePrefix: "clustercidrs", - } - clusterCIDRStorage, err := NewREST(restOptions) - if err != nil { - t.Fatalf("unexpected error from REST storage: %v", err) - } - return clusterCIDRStorage, server -} - -var ( - namespace = metav1.NamespaceNone - name = "foo-clustercidr" -) - -func newClusterCIDR() *networking.ClusterCIDR { - return &networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{ - { - MatchExpressions: []api.NodeSelectorRequirement{ - { - Key: "foo", - Operator: api.NodeSelectorOpIn, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - } -} - -func validClusterCIDR() *networking.ClusterCIDR { - return newClusterCIDR() -} - -func TestCreate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - validCC := validClusterCIDR() - noCIDRCC := validClusterCIDR() - noCIDRCC.Spec.IPv4 = "" - noCIDRCC.Spec.IPv6 = "" - invalidCCPerNodeHostBits := validClusterCIDR() - invalidCCPerNodeHostBits.Spec.PerNodeHostBits = 100 - invalidCCCIDR := validClusterCIDR() - invalidCCCIDR.Spec.IPv6 = "10.1.0.0/16" - - test.TestCreate( - // valid - validCC, - //invalid - noCIDRCC, - invalidCCPerNodeHostBits, - invalidCCCIDR, - ) -} - -func TestUpdate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestUpdate( - // valid - validClusterCIDR(), - // updateFunc - func(obj runtime.Object) runtime.Object { - object := obj.(*networking.ClusterCIDR) - object.Finalizers = []string{"test.k8s.io/test-finalizer"} - return object - }, - // invalid updateFunc: ObjectMeta is not to be tampered with. - func(obj runtime.Object) runtime.Object { - object := obj.(*networking.ClusterCIDR) - object.Name = "" - return object - }, - ) -} - -func TestDelete(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestDelete(validClusterCIDR()) -} - -func TestGet(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestGet(validClusterCIDR()) -} - -func TestList(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestList(validClusterCIDR()) -} - -func TestWatch(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestWatch( - validClusterCIDR(), - // matching labels - []labels.Set{}, - // not matching labels - []labels.Set{ - {"a": "c"}, - {"foo": "bar"}, - }, - // matching fields - []fields.Set{ - {"metadata.name": name}, - }, - // not matching fields - []fields.Set{ - {"metadata.name": "bar"}, - {"name": name}, - }, - ) -} - -func TestShortNames(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - expected := []string{"cc"} - registrytest.AssertShortNames(t, storage, expected) -} diff --git a/pkg/registry/networking/clustercidr/strategy.go b/pkg/registry/networking/clustercidr/strategy.go deleted file mode 100644 index a69a5f9041341..0000000000000 --- a/pkg/registry/networking/clustercidr/strategy.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/networking/validation" -) - -// clusterCIDRStrategy implements verification logic for ClusterCIDRs. -type clusterCIDRStrategy struct { - runtime.ObjectTyper - names.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating clusterCIDR objects. -var Strategy = clusterCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} - -// NamespaceScoped returns false because all clusterCIDRs do not need to be within a namespace. -func (clusterCIDRStrategy) NamespaceScoped() bool { - return false -} - -func (clusterCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {} - -func (clusterCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {} - -// Validate validates a new ClusterCIDR. -func (clusterCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { - clusterCIDR := obj.(*networking.ClusterCIDR) - return validation.ValidateClusterCIDR(clusterCIDR) -} - -// WarningsOnCreate returns warnings for the creation of the given object. -func (clusterCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { - return nil -} - -// Canonicalize normalizes the object after validation. -func (clusterCIDRStrategy) Canonicalize(obj runtime.Object) {} - -// AllowCreateOnUpdate is false for ClusterCIDR; this means POST is needed to create one. -func (clusterCIDRStrategy) AllowCreateOnUpdate() bool { - return false -} - -// ValidateUpdate is the default update validation for an end user. -func (clusterCIDRStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - validationErrorList := validation.ValidateClusterCIDR(obj.(*networking.ClusterCIDR)) - updateErrorList := validation.ValidateClusterCIDRUpdate(obj.(*networking.ClusterCIDR), old.(*networking.ClusterCIDR)) - return append(validationErrorList, updateErrorList...) -} - -// WarningsOnUpdate returns warnings for the given update. -func (clusterCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { - return nil -} - -// AllowUnconditionalUpdate is the default update policy for ClusterCIDR objects. -func (clusterCIDRStrategy) AllowUnconditionalUpdate() bool { - return true -} diff --git a/pkg/registry/networking/clustercidr/strategy_test.go b/pkg/registry/networking/clustercidr/strategy_test.go deleted file mode 100644 index f3225377666bf..0000000000000 --- a/pkg/registry/networking/clustercidr/strategy_test.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/networking" -) - -func newClusterCIDR() networking.ClusterCIDR { - return networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{ - { - MatchExpressions: []api.NodeSelectorRequirement{ - { - Key: "foo", - Operator: api.NodeSelectorOpIn, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - } -} - -func TestClusterCIDRStrategy(t *testing.T) { - ctx := genericapirequest.NewDefaultContext() - apiRequest := genericapirequest.RequestInfo{APIGroup: "networking.k8s.io", - APIVersion: "v1alpha1", - Resource: "clustercidrs", - } - ctx = genericapirequest.WithRequestInfo(ctx, &apiRequest) - if Strategy.NamespaceScoped() { - t.Errorf("ClusterCIDRs must be cluster scoped") - } - if Strategy.AllowCreateOnUpdate() { - t.Errorf("ClusterCIDRs should not allow create on update") - } - - ccc := newClusterCIDR() - Strategy.PrepareForCreate(ctx, &ccc) - - errs := Strategy.Validate(ctx, &ccc) - if len(errs) != 0 { - t.Errorf("Unexpected error validating %v", errs) - } - invalidCCC := newClusterCIDR() - invalidCCC.ResourceVersion = "4" - invalidCCC.Spec = networking.ClusterCIDRSpec{} - Strategy.PrepareForUpdate(ctx, &invalidCCC, &ccc) - errs = Strategy.ValidateUpdate(ctx, &invalidCCC, &ccc) - if len(errs) == 0 { - t.Errorf("Expected a validation error") - } - if invalidCCC.ResourceVersion != "4" { - t.Errorf("Incoming resource version on update should not be mutated") - } -} diff --git a/pkg/registry/networking/rest/storage_settings.go b/pkg/registry/networking/rest/storage_settings.go index 1ca958f9c8c84..59482432c1126 100644 --- a/pkg/registry/networking/rest/storage_settings.go +++ b/pkg/registry/networking/rest/storage_settings.go @@ -25,7 +25,6 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" - clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage" ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage" ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage" ipaddressstore "k8s.io/kubernetes/pkg/registry/networking/ipaddress/storage" @@ -90,14 +89,6 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) { storage := map[string]rest.Storage{} - // clustercidrs - if resource := "clustercidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) { - clusterCIDRCStorage, err := clustercidrstore.NewREST(restOptionsGetter) - if err != nil { - return storage, err - } - storage[resource] = clusterCIDRCStorage - } // ipaddress if resource := "ipaddresses"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) { diff --git a/pkg/scheduler/apis/config/scheme/scheme_test.go b/pkg/scheduler/apis/config/scheme/scheme_test.go index f1278c4e3bf23..05d684ca6684b 100644 --- a/pkg/scheduler/apis/config/scheme/scheme_test.go +++ b/pkg/scheduler/apis/config/scheme/scheme_test.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/yaml" ) @@ -196,7 +196,7 @@ profiles: wantProfiles: []config.KubeSchedulerProfile{ { SchedulerName: "default-scheduler", - PercentageOfNodesToScore: pointer.Int32(20), + PercentageOfNodesToScore: ptr.To[int32](20), Plugins: defaults.PluginsV1, PluginConfig: defaults.PluginConfigsV1, }, @@ -525,7 +525,7 @@ func TestCodecsEncodePluginConfig(t *testing.T) { Name: "InterPodAffinity", Args: runtime.RawExtension{ Object: &v1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(5), + HardPodAffinityWeight: ptr.To[int32](5), }, }, }, @@ -533,7 +533,7 @@ func TestCodecsEncodePluginConfig(t *testing.T) { Name: "VolumeBinding", Args: runtime.RawExtension{ Object: &v1.VolumeBindingArgs{ - BindTimeoutSeconds: pointer.Int64(300), + BindTimeoutSeconds: ptr.To[int64](300), Shape: []v1.UtilizationShapePoint{ { Utilization: 0, diff --git a/pkg/scheduler/apis/config/v1/default_plugins.go b/pkg/scheduler/apis/config/v1/default_plugins.go index a7d5a602619d0..ded35ef8fa120 100644 --- a/pkg/scheduler/apis/config/v1/default_plugins.go +++ b/pkg/scheduler/apis/config/v1/default_plugins.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // getDefaultPlugins returns the default set of plugins. @@ -34,10 +34,10 @@ func getDefaultPlugins() *v1.Plugins { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -45,11 +45,11 @@ func getDefaultPlugins() *v1.Plugins { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, }, }, @@ -113,7 +113,7 @@ func mergePluginSet(logger klog.Logger, defaultPluginSet, customPluginSet v1.Plu disabledPlugins := sets.New[string]() enabledCustomPlugins := make(map[string]pluginIndex) // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. - replacedPluginIndex := sets.NewInt() + replacedPluginIndex := sets.New[int]() var disabled []v1.Plugin for _, disabledPlugin := range customPluginSet.Disabled { // if the user is manually disabling any (or all, with "*") default plugins for an extension point, diff --git a/pkg/scheduler/apis/config/v1/default_plugins_test.go b/pkg/scheduler/apis/config/v1/default_plugins_test.go index a56a4306ea456..f05d524340031 100644 --- a/pkg/scheduler/apis/config/v1/default_plugins_test.go +++ b/pkg/scheduler/apis/config/v1/default_plugins_test.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestApplyFeatureGates(t *testing.T) { @@ -47,10 +47,10 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -58,11 +58,11 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, }, }, @@ -79,10 +79,10 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -90,11 +90,11 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, {Name: names.SchedulingGates}, }, @@ -112,10 +112,10 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -123,12 +123,12 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DynamicResources}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, {Name: names.SchedulingGates}, }, @@ -312,8 +312,8 @@ func TestMergePlugins(t *testing.T) { customPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin1", Weight: pointer.Int32(2)}, - {Name: "Plugin3", Weight: pointer.Int32(3)}, + {Name: "Plugin1", Weight: ptr.To[int32](2)}, + {Name: "Plugin3", Weight: ptr.To[int32](3)}, }, }, }, @@ -329,9 +329,9 @@ func TestMergePlugins(t *testing.T) { expectedPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin1", Weight: pointer.Int32(2)}, + {Name: "Plugin1", Weight: ptr.To[int32](2)}, {Name: "Plugin2"}, - {Name: "Plugin3", Weight: pointer.Int32(3)}, + {Name: "Plugin3", Weight: ptr.To[int32](3)}, }, }, }, @@ -341,8 +341,8 @@ func TestMergePlugins(t *testing.T) { customPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin2", Weight: pointer.Int32(2)}, - {Name: "Plugin1", Weight: pointer.Int32(1)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, + {Name: "Plugin1", Weight: ptr.To[int32](1)}, }, }, }, @@ -358,8 +358,8 @@ func TestMergePlugins(t *testing.T) { expectedPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin1", Weight: pointer.Int32(1)}, - {Name: "Plugin2", Weight: pointer.Int32(2)}, + {Name: "Plugin1", Weight: ptr.To[int32](1)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, {Name: "Plugin3"}, }, }, @@ -371,9 +371,9 @@ func TestMergePlugins(t *testing.T) { Filter: v1.PluginSet{ Enabled: []v1.Plugin{ {Name: "Plugin1"}, - {Name: "Plugin2", Weight: pointer.Int32(2)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, {Name: "Plugin3"}, - {Name: "Plugin2", Weight: pointer.Int32(4)}, + {Name: "Plugin2", Weight: ptr.To[int32](4)}, }, }, }, @@ -390,9 +390,9 @@ func TestMergePlugins(t *testing.T) { Filter: v1.PluginSet{ Enabled: []v1.Plugin{ {Name: "Plugin1"}, - {Name: "Plugin2", Weight: pointer.Int32(4)}, + {Name: "Plugin2", Weight: ptr.To[int32](4)}, {Name: "Plugin3"}, - {Name: "Plugin2", Weight: pointer.Int32(2)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, }, }, }, @@ -473,7 +473,7 @@ func TestMergePlugins(t *testing.T) { customPlugins: &v1.Plugins{ MultiPoint: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "DefaultPlugin", Weight: pointer.Int32(5)}, + {Name: "DefaultPlugin", Weight: ptr.To[int32](5)}, }, }, }, @@ -487,7 +487,7 @@ func TestMergePlugins(t *testing.T) { expectedPlugins: &v1.Plugins{ MultiPoint: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "DefaultPlugin", Weight: pointer.Int32(5)}, + {Name: "DefaultPlugin", Weight: ptr.To[int32](5)}, }, }, }, diff --git a/pkg/scheduler/apis/config/v1/defaults.go b/pkg/scheduler/apis/config/v1/defaults.go index 6746f23a9620b..c3775adcfe2c1 100644 --- a/pkg/scheduler/apis/config/v1/defaults.go +++ b/pkg/scheduler/apis/config/v1/defaults.go @@ -26,7 +26,7 @@ import ( configv1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var defaultResourceSpec = []configv1.ResourceSpec{ @@ -105,7 +105,7 @@ func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSch func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfiguration) { logger := klog.TODO() // called by generated code that doesn't pass a logger. See #115724 if obj.Parallelism == nil { - obj.Parallelism = pointer.Int32(16) + obj.Parallelism = ptr.To[int32](16) } if len(obj.Profiles) == 0 { @@ -114,7 +114,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura // Only apply a default scheduler name when there is a single profile. // Validation will ensure that every profile has a non-empty unique name. if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = pointer.String(v1.DefaultSchedulerName) + obj.Profiles[0].SchedulerName = ptr.To(v1.DefaultSchedulerName) } // Add the default set of plugins and apply the configuration. @@ -124,7 +124,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura } if obj.PercentageOfNodesToScore == nil { - obj.PercentageOfNodesToScore = pointer.Int32(config.DefaultPercentageOfNodesToScore) + obj.PercentageOfNodesToScore = ptr.To[int32](config.DefaultPercentageOfNodesToScore) } if len(obj.LeaderElection.ResourceLock) == 0 { @@ -155,42 +155,42 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = pointer.Int64(1) + obj.PodInitialBackoffSeconds = ptr.To[int64](1) } if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = pointer.Int64(10) + obj.PodMaxBackoffSeconds = ptr.To[int64](10) } // Enable profiling by default in the scheduler if obj.EnableProfiling == nil { - obj.EnableProfiling = pointer.Bool(true) + obj.EnableProfiling = ptr.To(true) } // Enable contention profiling by default if profiling is enabled if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = pointer.Bool(true) + obj.EnableContentionProfiling = ptr.To(true) } } func SetDefaults_DefaultPreemptionArgs(obj *configv1.DefaultPreemptionArgs) { if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = pointer.Int32(10) + obj.MinCandidateNodesPercentage = ptr.To[int32](10) } if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = pointer.Int32(100) + obj.MinCandidateNodesAbsolute = ptr.To[int32](100) } } func SetDefaults_InterPodAffinityArgs(obj *configv1.InterPodAffinityArgs) { if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = pointer.Int32(1) + obj.HardPodAffinityWeight = ptr.To[int32](1) } } func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) { if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = pointer.Int64(600) + obj.BindTimeoutSeconds = ptr.To[int64](600) } if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { obj.Shape = []configv1.UtilizationShapePoint{ diff --git a/pkg/scheduler/apis/config/v1/defaults_test.go b/pkg/scheduler/apis/config/v1/defaults_test.go index d2861e8f6400c..354737817c18c 100644 --- a/pkg/scheduler/apis/config/v1/defaults_test.go +++ b/pkg/scheduler/apis/config/v1/defaults_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var pluginConfigs = []configv1.PluginConfig{ @@ -46,8 +46,8 @@ var pluginConfigs = []configv1.PluginConfig{ Kind: "DefaultPreemptionArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - MinCandidateNodesPercentage: pointer.Int32(10), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](10), + MinCandidateNodesAbsolute: ptr.To[int32](100), }}, }, { @@ -58,7 +58,7 @@ var pluginConfigs = []configv1.PluginConfig{ Kind: "InterPodAffinityArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - HardPodAffinityWeight: pointer.Int32(1), + HardPodAffinityWeight: ptr.To[int32](1), }}, }, { @@ -110,7 +110,7 @@ var pluginConfigs = []configv1.PluginConfig{ Kind: "VolumeBindingArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), }}, }, } @@ -126,13 +126,13 @@ func TestSchedulerDefaults(t *testing.T) { name: "empty config", config: &configv1.KubeSchedulerConfiguration{}, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -145,14 +145,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -163,13 +163,13 @@ func TestSchedulerDefaults(t *testing.T) { Profiles: []configv1.KubeSchedulerProfile{{}}, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -182,12 +182,12 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs}, }, @@ -196,7 +196,7 @@ func TestSchedulerDefaults(t *testing.T) { { name: "two profiles", config: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), Profiles: []configv1.KubeSchedulerProfile{ { PluginConfig: []configv1.PluginConfig{ @@ -204,7 +204,7 @@ func TestSchedulerDefaults(t *testing.T) { }, }, { - SchedulerName: pointer.String("custom-scheduler"), + SchedulerName: ptr.To("custom-scheduler"), Plugins: &configv1.Plugins{ Bind: configv1.PluginSet{ Enabled: []configv1.Plugin{ @@ -219,13 +219,13 @@ func TestSchedulerDefaults(t *testing.T) { }, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -238,9 +238,9 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), @@ -254,8 +254,8 @@ func TestSchedulerDefaults(t *testing.T) { Kind: "DefaultPreemptionArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - MinCandidateNodesPercentage: pointer.Int32(10), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](10), + MinCandidateNodesAbsolute: ptr.To[int32](100), }}, }, { @@ -266,7 +266,7 @@ func TestSchedulerDefaults(t *testing.T) { Kind: "InterPodAffinityArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - HardPodAffinityWeight: pointer.Int32(1), + HardPodAffinityWeight: ptr.To[int32](1), }}, }, { @@ -318,23 +318,23 @@ func TestSchedulerDefaults(t *testing.T) { Kind: "VolumeBindingArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), }}, }, }, }, { - SchedulerName: pointer.String("custom-scheduler"), + SchedulerName: ptr.To("custom-scheduler"), Plugins: &configv1.Plugins{ MultiPoint: configv1.PluginSet{ Enabled: []configv1.Plugin{ {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -342,11 +342,11 @@ func TestSchedulerDefaults(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, {Name: names.SchedulingGates}, }, @@ -368,16 +368,16 @@ func TestSchedulerDefaults(t *testing.T) { { name: "Prallelism with no port", config: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -390,14 +390,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -405,16 +405,16 @@ func TestSchedulerDefaults(t *testing.T) { { name: "set non default parallelism", config: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(8), + Parallelism: ptr.To[int32](8), }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(8), + Parallelism: ptr.To[int32](8), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -427,14 +427,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -445,14 +445,14 @@ func TestSchedulerDefaults(t *testing.T) { DelayCacheUntilActive: true, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DelayCacheUntilActive: true, DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -465,14 +465,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -480,16 +480,16 @@ func TestSchedulerDefaults(t *testing.T) { { name: "set non default global percentageOfNodesToScore", config: &configv1.KubeSchedulerConfiguration{ - PercentageOfNodesToScore: pointer.Int32(50), + PercentageOfNodesToScore: ptr.To[int32](50), }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -502,14 +502,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(50), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](50), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -519,18 +519,18 @@ func TestSchedulerDefaults(t *testing.T) { config: &configv1.KubeSchedulerConfiguration{ Profiles: []configv1.KubeSchedulerProfile{ { - PercentageOfNodesToScore: pointer.Int32(50), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -543,15 +543,15 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), - PercentageOfNodesToScore: pointer.Int32(50), + SchedulerName: ptr.To("default-scheduler"), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, @@ -559,21 +559,21 @@ func TestSchedulerDefaults(t *testing.T) { { name: "set non default global and profile percentageOfNodesToScore", config: &configv1.KubeSchedulerConfiguration{ - PercentageOfNodesToScore: pointer.Int32(10), + PercentageOfNodesToScore: ptr.To[int32](10), Profiles: []configv1.KubeSchedulerProfile{ { - PercentageOfNodesToScore: pointer.Int32(50), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -586,15 +586,15 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(10), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](10), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), - PercentageOfNodesToScore: pointer.Int32(50), + SchedulerName: ptr.To("default-scheduler"), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, @@ -621,43 +621,43 @@ func TestPluginArgsDefaults(t *testing.T) { name: "DefaultPreemptionArgs empty", in: &configv1.DefaultPreemptionArgs{}, want: &configv1.DefaultPreemptionArgs{ - MinCandidateNodesPercentage: pointer.Int32(10), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](10), + MinCandidateNodesAbsolute: ptr.To[int32](100), }, }, { name: "DefaultPreemptionArgs with value", in: &configv1.DefaultPreemptionArgs{ - MinCandidateNodesPercentage: pointer.Int32(50), + MinCandidateNodesPercentage: ptr.To[int32](50), }, want: &configv1.DefaultPreemptionArgs{ - MinCandidateNodesPercentage: pointer.Int32(50), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](50), + MinCandidateNodesAbsolute: ptr.To[int32](100), }, }, { name: "InterPodAffinityArgs empty", in: &configv1.InterPodAffinityArgs{}, want: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(1), + HardPodAffinityWeight: ptr.To[int32](1), }, }, { name: "InterPodAffinityArgs explicit 0", in: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(0), + HardPodAffinityWeight: ptr.To[int32](0), }, want: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(0), + HardPodAffinityWeight: ptr.To[int32](0), }, }, { name: "InterPodAffinityArgs with value", in: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(5), + HardPodAffinityWeight: ptr.To[int32](5), }, want: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(5), + HardPodAffinityWeight: ptr.To[int32](5), }, }, { @@ -774,7 +774,7 @@ func TestPluginArgsDefaults(t *testing.T) { }, in: &configv1.VolumeBindingArgs{}, want: &configv1.VolumeBindingArgs{ - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), }, }, { @@ -784,7 +784,7 @@ func TestPluginArgsDefaults(t *testing.T) { }, in: &configv1.VolumeBindingArgs{}, want: &configv1.VolumeBindingArgs{ - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), Shape: []configv1.UtilizationShapePoint{ {Utilization: 0, Score: 0}, {Utilization: 100, Score: 10}, diff --git a/pkg/scheduler/apis/config/validation/validation_test.go b/pkg/scheduler/apis/config/validation/validation_test.go index d2f53a668eb2f..f25039b369ea7 100644 --- a/pkg/scheduler/apis/config/validation/validation_test.go +++ b/pkg/scheduler/apis/config/validation/validation_test.go @@ -26,7 +26,7 @@ import ( componentbaseconfig "k8s.io/component-base/config" "k8s.io/kubernetes/pkg/scheduler/apis/config" configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { @@ -56,7 +56,7 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { PodMaxBackoffSeconds: podMaxBackoffSeconds, Profiles: []config.KubeSchedulerProfile{{ SchedulerName: "me", - PercentageOfNodesToScore: pointer.Int32(35), + PercentageOfNodesToScore: ptr.To[int32](35), Plugins: &config.Plugins{ QueueSort: config.PluginSet{ Enabled: []config.Plugin{{Name: "CustomSort"}}, @@ -71,7 +71,7 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { }}, }, { SchedulerName: "other", - PercentageOfNodesToScore: pointer.Int32(35), + PercentageOfNodesToScore: ptr.To[int32](35), Plugins: &config.Plugins{ QueueSort: config.PluginSet{ Enabled: []config.Plugin{{Name: "CustomSort"}}, @@ -110,10 +110,10 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { healthzBindAddrInvalid.HealthzBindAddress = "0.0.0.0:9090" percentageOfNodesToScore101 := validConfig.DeepCopy() - percentageOfNodesToScore101.PercentageOfNodesToScore = pointer.Int32(101) + percentageOfNodesToScore101.PercentageOfNodesToScore = ptr.To[int32](101) percentageOfNodesToScoreNegative := validConfig.DeepCopy() - percentageOfNodesToScoreNegative.PercentageOfNodesToScore = pointer.Int32(-1) + percentageOfNodesToScoreNegative.PercentageOfNodesToScore = ptr.To[int32](-1) schedulerNameNotSet := validConfig.DeepCopy() schedulerNameNotSet.Profiles[1].SchedulerName = "" @@ -122,10 +122,10 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { repeatedSchedulerName.Profiles[0].SchedulerName = "other" profilePercentageOfNodesToScore101 := validConfig.DeepCopy() - profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = pointer.Int32(101) + profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = ptr.To[int32](101) profilePercentageOfNodesToScoreNegative := validConfig.DeepCopy() - profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = pointer.Int32(-1) + profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = ptr.To[int32](-1) differentQueueSort := validConfig.DeepCopy() differentQueueSort.Profiles[1].Plugins.QueueSort.Enabled[0].Name = "AnotherSort" diff --git a/pkg/scheduler/extender_test.go b/pkg/scheduler/extender_test.go index fc398a48cab02..44c3daee70cc3 100644 --- a/pkg/scheduler/extender_test.go +++ b/pkg/scheduler/extender_test.go @@ -31,38 +31,38 @@ import ( extenderv1 "k8s.io/kube-scheduler/extender/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/fake" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/runtime" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" ) func TestSchedulerWithExtenders(t *testing.T) { tests := []struct { name string - registerPlugins []st.RegisterPluginFunc - extenders []st.FakeExtender + registerPlugins []tf.RegisterPluginFunc + extenders []tf.FakeExtender nodes []string expectedResult ScheduleResult expectsErr bool }{ { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, + Predicates: []tf.FitPredicate{tf.ErrorPredicateExtender}, }, }, nodes: []string{"node1", "node2"}, @@ -70,19 +70,19 @@ func TestSchedulerWithExtenders(t *testing.T) { name: "test 1", }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.FalsePredicateExtender}, + Predicates: []tf.FitPredicate{tf.FalsePredicateExtender}, }, }, nodes: []string{"node1", "node2"}, @@ -90,19 +90,19 @@ func TestSchedulerWithExtenders(t *testing.T) { name: "test 2", }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, }, }, nodes: []string{"node1", "node2"}, @@ -114,19 +114,19 @@ func TestSchedulerWithExtenders(t *testing.T) { name: "test 3", }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.Node2PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node2PredicateExtender}, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, }, }, nodes: []string{"node1", "node2"}, @@ -134,16 +134,16 @@ func TestSchedulerWithExtenders(t *testing.T) { name: "test 4", }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, - Prioritizers: []st.PriorityConfig{{Function: st.ErrorPrioritizerExtender, Weight: 10}}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, + Prioritizers: []tf.PriorityConfig{{Function: tf.ErrorPrioritizerExtender, Weight: 10}}, Weight: 1, }, }, @@ -156,22 +156,22 @@ func TestSchedulerWithExtenders(t *testing.T) { name: "test 5", }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, - Prioritizers: []st.PriorityConfig{{Function: st.Node1PrioritizerExtender, Weight: 10}}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, + Prioritizers: []tf.PriorityConfig{{Function: tf.Node1PrioritizerExtender, Weight: 10}}, Weight: 1, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, - Prioritizers: []st.PriorityConfig{{Function: st.Node2PrioritizerExtender, Weight: 10}}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, + Prioritizers: []tf.PriorityConfig{{Function: tf.Node2PrioritizerExtender, Weight: 10}}, Weight: 5, }, }, @@ -184,17 +184,17 @@ func TestSchedulerWithExtenders(t *testing.T) { name: "test 6", }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterScorePlugin("Node2Prioritizer", st.NewNode2PrioritizerPlugin(), 20), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 20), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, - Prioritizers: []st.PriorityConfig{{Function: st.Node1PrioritizerExtender, Weight: 10}}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, + Prioritizers: []tf.PriorityConfig{{Function: tf.Node1PrioritizerExtender, Weight: 10}}, Weight: 1, }, }, @@ -214,17 +214,17 @@ func TestSchedulerWithExtenders(t *testing.T) { // If scheduler sends the pod by mistake, the test would fail // because of the errors from errorPredicateExtender and/or // errorPrioritizerExtender. - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterScorePlugin("Node2Prioritizer", st.NewNode2PrioritizerPlugin(), 1), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, - Prioritizers: []st.PriorityConfig{{Function: st.ErrorPrioritizerExtender, Weight: 10}}, + Predicates: []tf.FitPredicate{tf.ErrorPredicateExtender}, + Prioritizers: []tf.PriorityConfig{{Function: tf.ErrorPrioritizerExtender, Weight: 10}}, UnInterested: true, }, }, @@ -243,20 +243,20 @@ func TestSchedulerWithExtenders(t *testing.T) { // // If scheduler did not ignore the extender, the test would fail // because of the errors from errorPredicateExtender. - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, + Predicates: []tf.FitPredicate{tf.ErrorPredicateExtender}, Ignorable: true, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, }, }, nodes: []string{"node1", "node2"}, @@ -287,7 +287,7 @@ func TestSchedulerWithExtenders(t *testing.T) { for _, name := range test.nodes { cache.AddNode(logger, createNode(name)) } - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, test.registerPlugins, "", runtime.WithClientSet(client), @@ -510,7 +510,7 @@ func TestConvertToVictims(t *testing.T) { nodeInfo.AddPod(tt.podsInNodeList[i+2]) nodeInfoList = append(nodeInfoList, nodeInfo) } - tt.nodeInfos = fake.NodeInfoLister(nodeInfoList) + tt.nodeInfos = tf.NodeInfoLister(nodeInfoList) got, err := tt.httpExtender.convertToVictims(tt.nodeNameToMetaVictims, tt.nodeInfos) if (err != nil) != tt.wantErr { diff --git a/pkg/scheduler/framework/interface.go b/pkg/scheduler/framework/interface.go index 6fc1564e38708..409b6b95a42bc 100644 --- a/pkg/scheduler/framework/interface.go +++ b/pkg/scheduler/framework/interface.go @@ -73,22 +73,35 @@ type PluginScore struct { type Code int // These are predefined codes used in a Status. +// Note: when you add a new status, you have to add it in `codes` slice below. const ( // Success means that plugin ran correctly and found pod schedulable. // NOTE: A nil status is also considered as "Success". Success Code = iota - // Error is used for internal plugin errors, unexpected input, etc. + // Error is one of the failures, used for internal plugin errors, unexpected input, etc. + // Plugin shouldn't return this code for expected failures, like Unschedulable. + // Since it's the unexpected failure, the scheduling queue registers the pod without unschedulable plugins. + // Meaning, the Pod will be requeued to activeQ/backoffQ soon. Error - // Unschedulable is used when a plugin finds a pod unschedulable. The scheduler might attempt to + // Unschedulable is one of the failures, used when a plugin finds a pod unschedulable. + // If it's returned from PreFilter or Filter, the scheduler might attempt to // run other postFilter plugins like preemption to get this pod scheduled. // Use UnschedulableAndUnresolvable to make the scheduler skipping other postFilter plugins. // The accompanying status message should explain why the pod is unschedulable. + // + // We regard the backoff as a penalty of wasting the scheduling cycle. + // When the scheduling queue requeues Pods, which was rejected with Unschedulable in the last scheduling, + // the Pod goes through backoff. Unschedulable // UnschedulableAndUnresolvable is used when a plugin finds a pod unschedulable and // other postFilter plugins like preemption would not change anything. // Plugins should return Unschedulable if it is possible that the pod can get scheduled // after running other postFilter plugins. // The accompanying status message should explain why the pod is unschedulable. + // + // We regard the backoff as a penalty of wasting the scheduling cycle. + // When the scheduling queue requeues Pods, which was rejected with Unschedulable in the last scheduling, + // the Pod goes through backoff. UnschedulableAndUnresolvable // Wait is used when a Permit plugin finds a pod scheduling should wait. Wait @@ -97,10 +110,27 @@ const ( // - when a PreFilter plugin returns Skip so that coupled Filter plugin/PreFilterExtensions() will be skipped. // - when a PreScore plugin returns Skip so that coupled Score plugin will be skipped. Skip + // Pending means that the scheduling process is finished successfully, + // but the plugin wants to stop the scheduling cycle/binding cycle here. + // + // For example, the DRA plugin sometimes needs to wait for the external device driver + // to provision the resource for the Pod. + // It's different from when to return Unschedulable/UnschedulableAndUnresolvable, + // because in this case, the scheduler decides where the Pod can go successfully, + // but we need to wait for the external component to do something based on that scheduling result. + // + // We regard the backoff as a penalty of wasting the scheduling cycle. + // In the case of returning Pending, we cannot say the scheduling cycle is wasted + // because the scheduling result is used to proceed the Pod's scheduling forward, + // that particular scheduling cycle is failed though. + // So, Pods rejected by such reasons don't need to suffer a penalty (backoff). + // When the scheduling queue requeues Pods, which was rejected with Pending in the last scheduling, + // the Pod goes to activeQ directly ignoring backoff. + Pending ) // This list should be exactly the same as the codes iota defined above in the same order. -var codes = []string{"Success", "Error", "Unschedulable", "UnschedulableAndUnresolvable", "Wait", "Skip"} +var codes = []string{"Success", "Error", "Unschedulable", "UnschedulableAndUnresolvable", "Wait", "Skip", "Pending"} func (c Code) String() string { return codes[c] @@ -150,9 +180,9 @@ type Status struct { code Code reasons []string err error - // failedPlugin is an optional field that records the plugin name a Pod failed by. - // It's set by the framework when code is Error, Unschedulable or UnschedulableAndUnresolvable. - failedPlugin string + // plugin is an optional field that records the plugin name causes this status. + // It's set by the framework when code is Unschedulable, UnschedulableAndUnresolvable or Pending. + plugin string } func (s *Status) WithError(err error) *Status { @@ -176,21 +206,21 @@ func (s *Status) Message() string { return strings.Join(s.Reasons(), ", ") } -// SetFailedPlugin sets the given plugin name to s.failedPlugin. -func (s *Status) SetFailedPlugin(plugin string) { - s.failedPlugin = plugin +// SetPlugin sets the given plugin name to s.plugin. +func (s *Status) SetPlugin(plugin string) { + s.plugin = plugin } -// WithFailedPlugin sets the given plugin name to s.failedPlugin, +// WithPlugin sets the given plugin name to s.plugin, // and returns the given status object. -func (s *Status) WithFailedPlugin(plugin string) *Status { - s.SetFailedPlugin(plugin) +func (s *Status) WithPlugin(plugin string) *Status { + s.SetPlugin(plugin) return s } -// FailedPlugin returns the failed plugin name. -func (s *Status) FailedPlugin() string { - return s.failedPlugin +// Plugin returns the plugin name which caused this status. +func (s *Status) Plugin() string { + return s.plugin } // Reasons returns reasons of the Status. @@ -221,10 +251,10 @@ func (s *Status) IsSkip() bool { return s.Code() == Skip } -// IsUnschedulable returns true if "Status" is Unschedulable (Unschedulable or UnschedulableAndUnresolvable). -func (s *Status) IsUnschedulable() bool { +// IsRejected returns true if "Status" is Unschedulable (Unschedulable, UnschedulableAndUnresolvable, or Pending). +func (s *Status) IsRejected() bool { code := s.Code() - return code == Unschedulable || code == UnschedulableAndUnresolvable + return code == Unschedulable || code == UnschedulableAndUnresolvable || code == Pending } // AsError returns nil if the status is a success, a wait or a skip; otherwise returns an "error" object @@ -254,7 +284,7 @@ func (s *Status) Equal(x *Status) bool { if !cmp.Equal(s.reasons, x.reasons) { return false } - return cmp.Equal(s.failedPlugin, x.failedPlugin) + return cmp.Equal(s.plugin, x.plugin) } // NewStatus makes a Status out of the given arguments and returns its pointer. diff --git a/pkg/scheduler/framework/interface_test.go b/pkg/scheduler/framework/interface_test.go index cebab3bc2ed84..05e603484b63e 100644 --- a/pkg/scheduler/framework/interface_test.go +++ b/pkg/scheduler/framework/interface_test.go @@ -123,22 +123,6 @@ func TestStatus(t *testing.T) { } } -// The String() method relies on the value and order of the status codes to function properly. -func TestStatusCodes(t *testing.T) { - assertStatusCode(t, Success, 0) - assertStatusCode(t, Error, 1) - assertStatusCode(t, Unschedulable, 2) - assertStatusCode(t, UnschedulableAndUnresolvable, 3) - assertStatusCode(t, Wait, 4) - assertStatusCode(t, Skip, 5) -} - -func assertStatusCode(t *testing.T, code Code, value int) { - if int(code) != value { - t.Errorf("Status code %q should have a value of %v but got %v", code.String(), value, int(code)) - } -} - func TestPreFilterResultMerge(t *testing.T) { tests := map[string]struct { receiver *PreFilterResult diff --git a/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go b/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go index ead8818748192..aa1ed6a69437e 100644 --- a/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go +++ b/pkg/scheduler/framework/plugins/defaultbinder/default_binder.go @@ -38,7 +38,7 @@ type DefaultBinder struct { var _ framework.BindPlugin = &DefaultBinder{} // New creates a DefaultBinder. -func New(_ runtime.Object, handle framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) { return &DefaultBinder{handle: handle}, nil } diff --git a/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go b/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go index b54676fe010b5..d77080ee3ea25 100644 --- a/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go +++ b/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption.go @@ -63,7 +63,7 @@ func (pl *DefaultPreemption) Name() string { } // New initializes a new plugin and returns it. -func New(dpArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { +func New(_ context.Context, dpArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { args, ok := dpArgs.(*config.DefaultPreemptionArgs) if !ok { return nil, fmt.Errorf("got args of type %T, want *DefaultPreemptionArgs", dpArgs) diff --git a/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go b/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go index 16d17280adc19..0e24a7e9d54f5 100644 --- a/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go +++ b/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go @@ -56,6 +56,7 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" ) var ( @@ -103,7 +104,7 @@ type TestPlugin struct { name string } -func newTestPlugin(injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { +func newTestPlugin(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { return &TestPlugin{name: "test-plugin"}, nil } @@ -228,9 +229,9 @@ func TestPostFilter(t *testing.T) { "node1": framework.NewStatus(framework.Unschedulable), "node2": framework.NewStatus(framework.Unschedulable), }, - extender: &st.FakeExtender{ + extender: &tf.FakeExtender{ ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, }, wantResult: framework.NewPostFilterResultWithNominatedNode("node1"), wantStatus: framework.NewStatus(framework.Success), @@ -342,11 +343,11 @@ func TestPostFilter(t *testing.T) { podInformer.GetStore().Add(tt.pods[i]) } // Register NodeResourceFit as the Filter & PreFilter plugin. - registeredPlugins := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), - st.RegisterPluginAsExtensions("test-plugin", newTestPlugin, "PreFilter"), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registeredPlugins := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + tf.RegisterPluginAsExtensions("test-plugin", newTestPlugin, "PreFilter"), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), } var extenders []framework.Extender if tt.extender != nil { @@ -355,7 +356,7 @@ func TestPostFilter(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - f, err := st.NewFramework(ctx, registeredPlugins, "", + f, err := tf.NewFramework(ctx, registeredPlugins, "", frameworkruntime.WithClientSet(cs), frameworkruntime.WithEventRecorder(&events.FakeRecorder{}), frameworkruntime.WithInformerFactory(informerFactory), @@ -410,7 +411,7 @@ func TestDryRunPreemption(t *testing.T) { nodeNames []string testPods []*v1.Pod initPods []*v1.Pod - registerPlugins []st.RegisterPluginFunc + registerPlugins []tf.RegisterPluginFunc pdbs []*policy.PodDisruptionBudget fakeFilterRC framework.Code // return code for fake filter plugin disableParallelism bool @@ -419,8 +420,8 @@ func TestDryRunPreemption(t *testing.T) { }{ { name: "a pod that does not fit on any node", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("FalseFilter", st.NewFalseFilterPlugin), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -435,8 +436,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "a pod that fits with no preemption", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -452,8 +453,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "a pod that fits on one node with no preemption", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -470,8 +471,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "a pod that fits on both nodes when lower priority pods are preempted", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -501,8 +502,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "a pod that would fit on the nodes, but other pods running are higher priority, no preemption would happen", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -517,8 +518,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "medium priority pod is preempted, but lower priority one stays as it is small", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -549,8 +550,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "mixed priority pods are preempted", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -580,8 +581,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "mixed priority pods are preempted, pick later StartTime one when priorities are equal", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -611,9 +612,9 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "pod with anti-affinity is preempted", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), - st.RegisterPluginAsExtensions(interpodaffinity.Name, interpodaffinity.New, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + tf.RegisterPluginAsExtensions(interpodaffinity.Name, interpodaffinity.New, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -643,8 +644,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "preemption to resolve pod topology spread filter failure", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(podtopologyspread.Name, podTopologySpreadFunc, "PreFilter", "Filter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(podtopologyspread.Name, podTopologySpreadFunc, "PreFilter", "Filter"), }, nodeNames: []string{"node-a/zone1", "node-b/zone1", "node-x/zone2"}, testPods: []*v1.Pod{ @@ -680,8 +681,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "get Unschedulable in the preemption phase when the filter plugins filtering the nodes", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2"}, testPods: []*v1.Pod{ @@ -697,8 +698,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "preemption with violation of same pdb", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1"}, testPods: []*v1.Pod{ @@ -732,8 +733,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "preemption with violation of the pdb with pod whose eviction was processed, the victim doesn't belong to DisruptedPods", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1"}, testPods: []*v1.Pod{ @@ -767,8 +768,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "preemption with violation of the pdb with pod whose eviction was processed, the victim belongs to DisruptedPods", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1"}, testPods: []*v1.Pod{ @@ -802,8 +803,8 @@ func TestDryRunPreemption(t *testing.T) { }, { name: "preemption with violation of the pdb with pod whose eviction was processed, the victim which belongs to DisruptedPods is treated as 'nonViolating'", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1"}, testPods: []*v1.Pod{ @@ -840,8 +841,8 @@ func TestDryRunPreemption(t *testing.T) { { name: "all nodes are possible candidates, but DefaultPreemptionArgs limits to 2", args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1}, - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, testPods: []*v1.Pod{ @@ -877,8 +878,8 @@ func TestDryRunPreemption(t *testing.T) { { name: "some nodes are not possible candidates, DefaultPreemptionArgs limits to 2", args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1}, - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, testPods: []*v1.Pod{ @@ -914,8 +915,8 @@ func TestDryRunPreemption(t *testing.T) { { name: "preemption offset across multiple scheduling cycles and wrap around", args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1}, - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, testPods: []*v1.Pod{ @@ -983,8 +984,8 @@ func TestDryRunPreemption(t *testing.T) { { name: "preemption looks past numCandidates until a non-PDB violating node is found", args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 2}, - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), }, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, testPods: []*v1.Pod{ @@ -1059,18 +1060,18 @@ func TestDryRunPreemption(t *testing.T) { snapshot := internalcache.NewSnapshot(tt.initPods, nodes) // For each test, register a FakeFilterPlugin along with essential plugins and tt.registerPlugins. - fakePlugin := st.FakeFilterPlugin{ + fakePlugin := tf.FakeFilterPlugin{ FailedNodeReturnCodeMap: fakeFilterRCMap, } - registeredPlugins := append([]st.RegisterPluginFunc{ - st.RegisterFilterPlugin( + registeredPlugins := append([]tf.RegisterPluginFunc{ + tf.RegisterFilterPlugin( "FakeFilter", - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return &fakePlugin, nil }, )}, - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), ) registeredPlugins = append(registeredPlugins, tt.registerPlugins...) var objs []runtime.Object @@ -1092,7 +1093,7 @@ func TestDryRunPreemption(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, registeredPlugins, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())), @@ -1176,7 +1177,7 @@ func TestDryRunPreemption(t *testing.T) { func TestSelectBestCandidate(t *testing.T) { tests := []struct { name string - registerPlugin st.RegisterPluginFunc + registerPlugin tf.RegisterPluginFunc nodeNames []string pod *v1.Pod pods []*v1.Pod @@ -1184,7 +1185,7 @@ func TestSelectBestCandidate(t *testing.T) { }{ { name: "a pod that fits on both nodes when lower priority pods are preempted", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(largeRes).Obj(), pods: []*v1.Pod{ @@ -1195,7 +1196,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "node with min highest priority pod is picked", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1210,7 +1211,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "when highest priorities are the same, minimum sum of priorities is picked", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1225,7 +1226,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "when highest priority and sum are the same, minimum number of pods is picked", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1245,7 +1246,7 @@ func TestSelectBestCandidate(t *testing.T) { // pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This // test ensures that the logic works correctly. name: "sum of adjusted priorities is considered", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1262,7 +1263,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "non-overlapping lowest high priority, sum priorities, and number of pods", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3", "node4"}, pod: st.MakePod().Name("p").UID("p").Priority(veryHighPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1283,7 +1284,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "same priority, same number of victims, different start time for each node's pod", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1298,7 +1299,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "same priority, same number of victims, different start time for all pods", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1313,7 +1314,7 @@ func TestSelectBestCandidate(t *testing.T) { }, { name: "different priority, same number of victims, different start time for all pods", - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), nodeNames: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pods: []*v1.Pod{ @@ -1346,12 +1347,12 @@ func TestSelectBestCandidate(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, - []st.RegisterPluginFunc{ + []tf.RegisterPluginFunc{ tt.registerPlugin, - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())), @@ -1493,11 +1494,11 @@ func TestPodEligibleToPreemptOthers(t *testing.T) { for _, n := range test.nodes { nodes = append(nodes, st.MakeNode().Name(n).Obj()) } - registeredPlugins := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registeredPlugins := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), } - f, err := st.NewFramework(ctx, registeredPlugins, "", + f, err := tf.NewFramework(ctx, registeredPlugins, "", frameworkruntime.WithSnapshotSharedLister(internalcache.NewSnapshot(test.pods, nodes)), frameworkruntime.WithLogger(logger), ) @@ -1516,9 +1517,9 @@ func TestPreempt(t *testing.T) { name string pod *v1.Pod pods []*v1.Pod - extenders []*st.FakeExtender + extenders []*tf.FakeExtender nodeNames []string - registerPlugin st.RegisterPluginFunc + registerPlugin tf.RegisterPluginFunc want *framework.PostFilterResult expectedPods []string // list of preempted pods }{ @@ -1532,7 +1533,7 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p3.1").UID("p3.1").Node("node3").Priority(midPriority).Req(mediumRes).Obj(), }, nodeNames: []string{"node1", "node2", "node3"}, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), want: framework.NewPostFilterResultWithNominatedNode("node1"), expectedPods: []string{"p1.1", "p1.2"}, }, @@ -1550,7 +1551,7 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p-x2").UID("p-x2").Namespace(v1.NamespaceDefault).Node("node-x").Label("foo", "").Priority(highPriority).Obj(), }, nodeNames: []string{"node-a/zone1", "node-b/zone1", "node-x/zone2"}, - registerPlugin: st.RegisterPluginAsExtensions(podtopologyspread.Name, podTopologySpreadFunc, "PreFilter", "Filter"), + registerPlugin: tf.RegisterPluginAsExtensions(podtopologyspread.Name, podTopologySpreadFunc, "PreFilter", "Filter"), want: framework.NewPostFilterResultWithNominatedNode("node-b"), expectedPods: []string{"p-b1"}, }, @@ -1563,17 +1564,17 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p2.1").UID("p2.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(largeRes).Obj(), }, nodeNames: []string{"node1", "node2", "node3"}, - extenders: []*st.FakeExtender{ + extenders: []*tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, }, }, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), want: framework.NewPostFilterResultWithNominatedNode("node1"), expectedPods: []string{"p1.1", "p1.2"}, }, @@ -1586,13 +1587,13 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p2.1").UID("p2.1").Namespace(v1.NamespaceDefault).Node("node2").Priority(midPriority).Req(largeRes).Obj(), }, nodeNames: []string{"node1", "node2", "node3"}, - extenders: []*st.FakeExtender{ + extenders: []*tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.FalsePredicateExtender}, + Predicates: []tf.FitPredicate{tf.FalsePredicateExtender}, }, }, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), want: nil, expectedPods: []string{}, }, @@ -1605,18 +1606,18 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p2.1").UID("p2.1").Namespace(v1.NamespaceDefault).Node("node2").Priority(midPriority).Req(largeRes).Obj(), }, nodeNames: []string{"node1", "node2", "node3"}, - extenders: []*st.FakeExtender{ + extenders: []*tf.FakeExtender{ { - Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, + Predicates: []tf.FitPredicate{tf.ErrorPredicateExtender}, Ignorable: true, ExtenderName: "FakeExtender1", }, { - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, ExtenderName: "FakeExtender2", }, }, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), want: framework.NewPostFilterResultWithNominatedNode("node1"), expectedPods: []string{"p1.1", "p1.2"}, }, @@ -1629,18 +1630,18 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p2.1").UID("p2.1").Namespace(v1.NamespaceDefault).Node("node2").Priority(midPriority).Req(largeRes).Obj(), }, nodeNames: []string{"node1", "node2"}, - extenders: []*st.FakeExtender{ + extenders: []*tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.Node1PredicateExtender}, + Predicates: []tf.FitPredicate{tf.Node1PredicateExtender}, UnInterested: true, }, { ExtenderName: "FakeExtender2", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, }, }, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), // sum of priorities of all victims on node1 is larger than node2, node2 is chosen. want: framework.NewPostFilterResultWithNominatedNode("node2"), expectedPods: []string{"p2.1"}, @@ -1655,7 +1656,7 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(), }, nodeNames: []string{"node1", "node2", "node3"}, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), want: nil, expectedPods: nil, }, @@ -1669,7 +1670,7 @@ func TestPreempt(t *testing.T) { st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(), }, nodeNames: []string{"node1", "node2", "node3"}, - registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), + registerPlugin: tf.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"), want: framework.NewPostFilterResultWithNominatedNode("node1"), expectedPods: []string{"p1.1", "p1.2"}, }, @@ -1730,12 +1731,12 @@ func TestPreempt(t *testing.T) { extender.CachedNodeNameToInfo = cachedNodeInfoMap extenders = append(extenders, extender) } - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, - []st.RegisterPluginFunc{ + []tf.RegisterPluginFunc{ test.registerPlugin, - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, "", frameworkruntime.WithClientSet(client), @@ -1772,7 +1773,7 @@ func TestPreempt(t *testing.T) { Interface: &pl, } res, status := pe.Preempt(ctx, test.pod, make(framework.NodeToStatusMap)) - if !status.IsSuccess() && !status.IsUnschedulable() { + if !status.IsSuccess() && !status.IsRejected() { t.Errorf("unexpected error in preemption: %v", status.AsError()) } if diff := cmp.Diff(test.want, res); diff != "" { @@ -1811,7 +1812,7 @@ func TestPreempt(t *testing.T) { // Call preempt again and make sure it doesn't preempt any more pods. res, status = pe.Preempt(ctx, test.pod, make(framework.NodeToStatusMap)) - if !status.IsSuccess() && !status.IsUnschedulable() { + if !status.IsSuccess() && !status.IsRejected() { t.Errorf("unexpected error in preemption: %v", status.AsError()) } if res != nil && res.NominatingInfo != nil && len(deletedPodNames) > 0 { diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go index 4f70fba83a2c1..382175b35a687 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + resourcev1alpha2apply "k8s.io/client-go/applyconfigurations/resource/v1alpha2" "k8s.io/client-go/kubernetes" resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2" "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" @@ -42,6 +43,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" schedutil "k8s.io/kubernetes/pkg/scheduler/util" + "k8s.io/utils/ptr" ) const ( @@ -74,7 +76,7 @@ type stateData struct { // // Set in parallel during Filter, so write access there must be // protected by the mutex. Used by PostFilter. - unavailableClaims sets.Int + unavailableClaims sets.Set[int] // podSchedulingState keeps track of the PodSchedulingContext // (if one exists) and the changes made to it. @@ -186,6 +188,41 @@ func (p *podSchedulingState) publish(ctx context.Context, pod *v1.Pod, clientset logger.V(5).Info("Updating PodSchedulingContext", "podSchedulingCtx", klog.KObj(schedulingCtx)) } _, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Update(ctx, schedulingCtx, metav1.UpdateOptions{}) + if apierrors.IsConflict(err) { + // We don't use SSA by default for performance reasons + // (https://github.com/kubernetes/kubernetes/issues/113700#issuecomment-1698563918) + // because most of the time an Update doesn't encounter + // a conflict and is faster. + // + // We could return an error here and rely on + // backoff+retry, but scheduling attempts are expensive + // and the backoff delay would cause a (small) + // slowdown. Therefore we fall back to SSA here if needed. + // + // Using SSA instead of Get+Update has the advantage that + // there is no delay for the Get. SSA is safe because only + // the scheduler updates these fields. + spec := resourcev1alpha2apply.PodSchedulingContextSpec() + spec.SelectedNode = p.selectedNode + if p.potentialNodes != nil { + spec.PotentialNodes = *p.potentialNodes + } else { + // Unchanged. Has to be set because the object that we send + // must represent the "fully specified intent". Not sending + // the list would clear it. + spec.PotentialNodes = p.schedulingCtx.Spec.PotentialNodes + } + schedulingCtxApply := resourcev1alpha2apply.PodSchedulingContext(pod.Name, pod.Namespace).WithSpec(spec) + + if loggerV := logger.V(6); loggerV.Enabled() { + // At a high enough log level, dump the entire object. + loggerV.Info("Patching PodSchedulingContext", "podSchedulingCtx", klog.KObj(pod), "podSchedulingCtxApply", klog.Format(schedulingCtxApply)) + } else { + logger.V(5).Info("Patching PodSchedulingContext", "podSchedulingCtx", klog.KObj(pod)) + } + _, err = clientset.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Apply(ctx, schedulingCtxApply, metav1.ApplyOptions{FieldManager: "kube-scheduler", Force: true}) + } + } else { // Create it. schedulingCtx := &resourcev1alpha2.PodSchedulingContext{ @@ -240,7 +277,7 @@ type dynamicResources struct { } // New initializes a new plugin and returns it. -func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { +func New(_ context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { if !fts.EnableDynamicResourceAllocation { // Disabled, won't do anything. return &dynamicResources{}, nil @@ -305,17 +342,16 @@ func (pl *dynamicResources) PreEnqueue(ctx context.Context, pod *v1.Pod) (status // an informer. It checks whether that change made a previously unschedulable // pod schedulable. It errs on the side of letting a pod scheduling attempt // happen. -func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { +func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { if newObj == nil { // Deletes don't make a pod schedulable. - return framework.QueueSkip + return framework.QueueSkip, nil } - _, modifiedClaim, err := schedutil.As[*resourcev1alpha2.ResourceClaim](nil, newObj) + originalClaim, modifiedClaim, err := schedutil.As[*resourcev1alpha2.ResourceClaim](oldObj, newObj) if err != nil { // Shouldn't happen. - logger.Error(err, "unexpected new object in isSchedulableAfterClaimChange") - return framework.QueueAfterBackoff + return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClaimChange: %w", err) } usesClaim := false @@ -328,30 +364,24 @@ func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, po // foreachPodResourceClaim only returns errors for "not // schedulable". logger.V(4).Info("pod is not schedulable", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim), "reason", err.Error()) - return framework.QueueSkip + return framework.QueueSkip, nil } if !usesClaim { // This was not the claim the pod was waiting for. logger.V(6).Info("unrelated claim got modified", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.QueueSkip + return framework.QueueSkip, nil } - if oldObj == nil { + if originalClaim == nil { logger.V(4).Info("claim for pod got created", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.QueueImmediately + return framework.Queue, nil } // Modifications may or may not be relevant. If the entire // status is as before, then something else must have changed // and we don't care. What happens in practice is that the // resource driver adds the finalizer. - originalClaim, ok := oldObj.(*resourcev1alpha2.ResourceClaim) - if !ok { - // Shouldn't happen. - logger.Error(nil, "unexpected old object in isSchedulableAfterClaimAddOrUpdate", "obj", oldObj) - return framework.QueueAfterBackoff - } if apiequality.Semantic.DeepEqual(&originalClaim.Status, &modifiedClaim.Status) { if loggerV := logger.V(7); loggerV.Enabled() { // Log more information. @@ -359,11 +389,11 @@ func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, po } else { logger.V(6).Info("claim for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) } - return framework.QueueSkip + return framework.QueueSkip, nil } logger.V(4).Info("status of claim for pod got updated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) - return framework.QueueImmediately + return framework.Queue, nil } // isSchedulableAfterPodSchedulingContextChange is invoked for all @@ -371,25 +401,24 @@ func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, po // change made a previously unschedulable pod schedulable (updated) or a new // attempt is needed to re-create the object (deleted). It errs on the side of // letting a pod scheduling attempt happen. -func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { +func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { // Deleted? That can happen because we ourselves delete the PodSchedulingContext while // working on the pod. This can be ignored. if oldObj != nil && newObj == nil { logger.V(4).Info("PodSchedulingContext got deleted") - return framework.QueueSkip + return framework.QueueSkip, nil } oldPodScheduling, newPodScheduling, err := schedutil.As[*resourcev1alpha2.PodSchedulingContext](oldObj, newObj) if err != nil { // Shouldn't happen. - logger.Error(nil, "isSchedulableAfterPodSchedulingChange") - return framework.QueueAfterBackoff + return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterPodSchedulingContextChange: %w", err) } podScheduling := newPodScheduling // Never nil because deletes are handled above. if podScheduling.Name != pod.Name || podScheduling.Namespace != pod.Namespace { logger.V(7).Info("PodSchedulingContext for unrelated pod got modified", "pod", klog.KObj(pod), "podScheduling", klog.KObj(podScheduling)) - return framework.QueueSkip + return framework.QueueSkip, nil } // If the drivers have provided information about all @@ -409,7 +438,7 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger // foreachPodResourceClaim only returns errors for "not // schedulable". logger.V(4).Info("pod is not schedulable, keep waiting", "pod", klog.KObj(pod), "reason", err.Error()) - return framework.QueueSkip + return framework.QueueSkip, nil } // Some driver responses missing? @@ -423,14 +452,14 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger } else { logger.V(5).Info("PodSchedulingContext with missing resource claim information, keep waiting", "pod", klog.KObj(pod)) } - return framework.QueueSkip + return framework.QueueSkip, nil } if oldPodScheduling == nil /* create */ || len(oldPodScheduling.Status.ResourceClaims) < len(podScheduling.Status.ResourceClaims) /* new information and not incomplete (checked above) */ { // This definitely is new information for the scheduler. Try again immediately. logger.V(4).Info("PodSchedulingContext for pod has all required information, schedule immediately", "pod", klog.KObj(pod)) - return framework.QueueImmediately + return framework.Queue, nil } // The other situation where the scheduler needs to do @@ -455,7 +484,7 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger for _, claimStatus := range podScheduling.Status.ResourceClaims { if sliceContains(claimStatus.UnsuitableNodes, podScheduling.Spec.SelectedNode) { logger.V(5).Info("PodSchedulingContext has unsuitable selected node, schedule immediately", "pod", klog.KObj(pod), "selectedNode", podScheduling.Spec.SelectedNode, "podResourceName", claimStatus.Name) - return framework.QueueImmediately + return framework.Queue, nil } } } @@ -465,12 +494,12 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger !apiequality.Semantic.DeepEqual(&oldPodScheduling.Spec, &podScheduling.Spec) && apiequality.Semantic.DeepEqual(&oldPodScheduling.Status, &podScheduling.Status) { logger.V(5).Info("PodSchedulingContext has only the scheduler spec changes, ignore the update", "pod", klog.KObj(pod)) - return framework.QueueSkip + return framework.QueueSkip, nil } // Once we get here, all changes which are known to require special responses // have been checked for. Whatever the change was, we don't know exactly how - // to handle it and thus return QueueAfterBackoff. This will cause the + // to handle it and thus return Queue. This will cause the // scheduler to treat the event as if no event hint callback had been provided. // Developers who want to investigate this can enable a diff at log level 6. if loggerV := logger.V(6); loggerV.Enabled() { @@ -478,7 +507,7 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger } else { logger.V(5).Info("PodSchedulingContext for pod with unknown changes, maybe schedule", "pod", klog.KObj(pod)) } - return framework.QueueAfterBackoff + return framework.Queue, nil } @@ -720,7 +749,7 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState state.mutex.Lock() defer state.mutex.Unlock() if state.unavailableClaims == nil { - state.unavailableClaims = sets.NewInt() + state.unavailableClaims = sets.New[int]() } for index := range unavailableClaims { @@ -762,6 +791,19 @@ func (pl *dynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS claim := state.claims[index] if len(claim.Status.ReservedFor) == 0 || len(claim.Status.ReservedFor) == 1 && claim.Status.ReservedFor[0].UID == pod.UID { + // Before we tell a driver to deallocate a claim, we + // have to stop telling it to allocate. Otherwise, + // depending on timing, it will deallocate the claim, + // see a PodSchedulingContext with selected node, and + // allocate again for that same node. + if state.podSchedulingState.schedulingCtx != nil && + state.podSchedulingState.schedulingCtx.Spec.SelectedNode != "" { + state.podSchedulingState.selectedNode = ptr.To("") + if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil { + return nil, statusError(logger, err) + } + } + claim := state.claims[index].DeepCopy() claim.Status.DeallocationRequested = true claim.Status.ReservedFor = nil @@ -955,7 +997,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil { return statusError(logger, err) } - return statusUnschedulable(logger, "waiting for resource driver to allocate resource", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) + return statusPending(logger, "waiting for resource driver to allocate resource", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) } } @@ -971,7 +1013,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat // provisioning? On the one hand, volume provisioning is currently // irreversible, so it better should come last. On the other hand, // triggering both in parallel might be faster. - return statusUnschedulable(logger, "waiting for resource driver to provide information", "pod", klog.KObj(pod)) + return statusPending(logger, "waiting for resource driver to provide information", "pod", klog.KObj(pod)) } func containsNode(hay []string, needle string) bool { @@ -1067,6 +1109,21 @@ func statusUnschedulable(logger klog.Logger, reason string, kv ...interface{}) * return framework.NewStatus(framework.UnschedulableAndUnresolvable, reason) } +// statusPending ensures that there is a log message associated with the +// line where the status originated. +func statusPending(logger klog.Logger, reason string, kv ...interface{}) *framework.Status { + if loggerV := logger.V(5); loggerV.Enabled() { + helper, loggerV := loggerV.WithCallStackHelper() + helper() + kv = append(kv, "reason", reason) + // nolint: logcheck // warns because it cannot check key/values + loggerV.Info("pod waiting for external component", kv...) + } + + // When we return Pending, we want to block the Pod at the same time. + return framework.NewStatus(framework.Pending, reason) +} + // statusError ensures that there is a log message associated with the // line where the error originated. func statusError(logger klog.Logger, err error, kv ...interface{}) *framework.Status { diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go index b284a7bfa8f30..fbbc136c89352 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go @@ -346,7 +346,7 @@ func TestPlugin(t *testing.T) { classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ - status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`), + status: framework.NewStatus(framework.Pending, `waiting for resource driver to allocate resource`), added: []metav1.Object{schedulingSelectedPotential}, }, }, @@ -360,7 +360,7 @@ func TestPlugin(t *testing.T) { classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ - status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to provide information`), + status: framework.NewStatus(framework.Pending, `waiting for resource driver to provide information`), added: []metav1.Object{schedulingPotential}, }, }, @@ -374,7 +374,7 @@ func TestPlugin(t *testing.T) { classes: []*resourcev1alpha2.ResourceClass{resourceClass}, want: want{ reserve: result{ - status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`), + status: framework.NewStatus(framework.Pending, `waiting for resource driver to allocate resource`), changes: change{ scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext { return st.FromPodSchedulingContexts(in). @@ -640,9 +640,7 @@ func (tc *testContext) verify(t *testing.T, expected result, initialObjects []me assert.Equal(t, expected.status, status) objects := tc.listAll(t) wantObjects := update(t, initialObjects, expected.changes) - for _, add := range expected.added { - wantObjects = append(wantObjects, add) - } + wantObjects = append(wantObjects, expected.added...) for _, remove := range expected.removed { for i, obj := range wantObjects { // This is a bit relaxed (no GVR comparison, no UID @@ -780,7 +778,7 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceCl t.Fatal(err) } - pl, err := New(nil, fh, feature.Features{EnableDynamicResourceAllocation: true}) + pl, err := New(ctx, nil, fh, feature.Features{EnableDynamicResourceAllocation: true}) if err != nil { t.Fatal(err) } @@ -889,6 +887,7 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { claims []*resourcev1alpha2.ResourceClaim oldObj, newObj interface{} expectedHint framework.QueueingHint + expectedErr bool }{ "skip-deletes": { pod: podWithClaimTemplate, @@ -897,9 +896,9 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { expectedHint: framework.QueueSkip, }, "backoff-wrong-new-object": { - pod: podWithClaimTemplate, - newObj: "not-a-claim", - expectedHint: framework.QueueAfterBackoff, + pod: podWithClaimTemplate, + newObj: "not-a-claim", + expectedErr: true, }, "skip-wrong-claim": { pod: podWithClaimTemplate, @@ -924,13 +923,13 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { "queue-on-add": { pod: podWithClaimName, newObj: pendingImmediateClaim, - expectedHint: framework.QueueImmediately, + expectedHint: framework.Queue, }, "backoff-wrong-old-object": { - pod: podWithClaimName, - oldObj: "not-a-claim", - newObj: pendingImmediateClaim, - expectedHint: framework.QueueAfterBackoff, + pod: podWithClaimName, + oldObj: "not-a-claim", + newObj: pendingImmediateClaim, + expectedErr: true, }, "skip-adding-finalizer": { pod: podWithClaimName, @@ -952,7 +951,7 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { claim.Status.Allocation = &resourcev1alpha2.AllocationResult{} return claim }(), - expectedHint: framework.QueueImmediately, + expectedHint: framework.Queue, }, } @@ -969,7 +968,13 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { require.NoError(t, store.Update(claim)) } } - actualHint := testCtx.p.isSchedulableAfterClaimChange(logger, tc.pod, tc.oldObj, tc.newObj) + actualHint, err := testCtx.p.isSchedulableAfterClaimChange(logger, tc.pod, tc.oldObj, tc.newObj) + if tc.expectedErr { + require.Error(t, err) + return + } + + require.NoError(t, err) require.Equal(t, tc.expectedHint, actualHint) }) } @@ -982,6 +987,7 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { claims []*resourcev1alpha2.ResourceClaim oldObj, newObj interface{} expectedHint framework.QueueingHint + expectedErr bool }{ "skip-deleted": { pod: podWithClaimTemplate, @@ -996,18 +1002,18 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { expectedHint: framework.QueueSkip, }, "backoff-wrong-old-object": { - pod: podWithClaimTemplate, - oldObj: "not-a-scheduling-context", - newObj: scheduling, - expectedHint: framework.QueueAfterBackoff, + pod: podWithClaimTemplate, + oldObj: "not-a-scheduling-context", + newObj: scheduling, + expectedErr: true, }, "backoff-missed-wrong-old-object": { pod: podWithClaimTemplate, oldObj: cache.DeletedFinalStateUnknown{ Obj: "not-a-scheduling-context", }, - newObj: scheduling, - expectedHint: framework.QueueAfterBackoff, + newObj: scheduling, + expectedErr: true, }, "skip-unrelated-object": { pod: podWithClaimTemplate, @@ -1020,10 +1026,10 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { expectedHint: framework.QueueSkip, }, "backoff-wrong-new-object": { - pod: podWithClaimTemplate, - oldObj: scheduling, - newObj: "not-a-scheduling-context", - expectedHint: framework.QueueAfterBackoff, + pod: podWithClaimTemplate, + oldObj: scheduling, + newObj: "not-a-scheduling-context", + expectedErr: true, }, "skip-missing-claim": { pod: podWithClaimTemplate, @@ -1043,7 +1049,7 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim}, oldObj: scheduling, newObj: schedulingInfo, - expectedHint: framework.QueueImmediately, + expectedHint: framework.Queue, }, "queue-bad-selected-node": { pod: podWithClaimTemplateInStatus, @@ -1059,7 +1065,7 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(scheduling.Status.ResourceClaims[0].UnsuitableNodes, scheduling.Spec.SelectedNode) return scheduling }(), - expectedHint: framework.QueueImmediately, + expectedHint: framework.Queue, }, "skip-spec-changes": { pod: podWithClaimTemplateInStatus, @@ -1081,7 +1087,7 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { scheduling.Finalizers = append(scheduling.Finalizers, "foo") return scheduling }(), - expectedHint: framework.QueueAfterBackoff, + expectedHint: framework.Queue, }, } @@ -1091,7 +1097,13 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { t.Parallel() logger, _ := ktesting.NewTestContext(t) testCtx := setup(t, nil, tc.claims, nil, tc.schedulings) - actualHint := testCtx.p.isSchedulableAfterPodSchedulingContextChange(logger, tc.pod, tc.oldObj, tc.newObj) + actualHint, err := testCtx.p.isSchedulableAfterPodSchedulingContextChange(logger, tc.pod, tc.oldObj, tc.newObj) + if tc.expectedErr { + require.Error(t, err) + return + } + + require.NoError(t, err) require.Equal(t, tc.expectedHint, actualHint) }) } diff --git a/pkg/scheduler/framework/plugins/examples/multipoint/multipoint.go b/pkg/scheduler/framework/plugins/examples/multipoint/multipoint.go index f7e9bf9f4ec37..9edbbcf1803df 100644 --- a/pkg/scheduler/framework/plugins/examples/multipoint/multipoint.go +++ b/pkg/scheduler/framework/plugins/examples/multipoint/multipoint.go @@ -86,6 +86,6 @@ func (mc CommunicatingPlugin) PreBind(ctx context.Context, state *framework.Cycl } // New initializes a new plugin and returns it. -func New(_ *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) { return &CommunicatingPlugin{}, nil } diff --git a/pkg/scheduler/framework/plugins/examples/prebind/prebind.go b/pkg/scheduler/framework/plugins/examples/prebind/prebind.go index 2365fdc21bb50..b76892cb5c3fd 100644 --- a/pkg/scheduler/framework/plugins/examples/prebind/prebind.go +++ b/pkg/scheduler/framework/plugins/examples/prebind/prebind.go @@ -49,6 +49,6 @@ func (sr StatelessPreBindExample) PreBind(ctx context.Context, state *framework. } // New initializes a new plugin and returns it. -func New(_ *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) { return &StatelessPreBindExample{}, nil } diff --git a/pkg/scheduler/framework/plugins/examples/stateful/stateful.go b/pkg/scheduler/framework/plugins/examples/stateful/stateful.go index 2eb22468da82d..967c6fcf5fc6a 100644 --- a/pkg/scheduler/framework/plugins/examples/stateful/stateful.go +++ b/pkg/scheduler/framework/plugins/examples/stateful/stateful.go @@ -82,9 +82,9 @@ func (mp *MultipointExample) PreBind(ctx context.Context, state *framework.Cycle } // New initializes a new plugin and returns it. -func New(config *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) { +func New(ctx context.Context, config *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) { if config == nil { - klog.ErrorS(nil, "MultipointExample configuration cannot be empty") + klog.FromContext(ctx).Error(nil, "MultipointExample configuration cannot be empty") return nil, fmt.Errorf("MultipointExample configuration cannot be empty") } mp := MultipointExample{} diff --git a/pkg/scheduler/framework/plugins/feature/feature.go b/pkg/scheduler/framework/plugins/feature/feature.go index 4d1ee444cf766..1a21769aad0eb 100644 --- a/pkg/scheduler/framework/plugins/feature/feature.go +++ b/pkg/scheduler/framework/plugins/feature/feature.go @@ -21,7 +21,6 @@ package feature // the internal k8s features pkg. type Features struct { EnableDynamicResourceAllocation bool - EnableReadWriteOncePod bool EnableVolumeCapacityPriority bool EnableMinDomainsInPodTopologySpread bool EnableNodeInclusionPolicyInPodTopologySpread bool diff --git a/pkg/scheduler/framework/plugins/imagelocality/image_locality.go b/pkg/scheduler/framework/plugins/imagelocality/image_locality.go index 9eeab05b260b8..bdf3c6c62134e 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/image_locality.go +++ b/pkg/scheduler/framework/plugins/imagelocality/image_locality.go @@ -74,7 +74,7 @@ func (pl *ImageLocality) ScoreExtensions() framework.ScoreExtensions { } // New initializes a new plugin and returns it. -func New(_ runtime.Object, h framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, h framework.Handle) (framework.Plugin, error) { return &ImageLocality{handle: h}, nil } diff --git a/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go b/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go index 26ee547f59593..8e16df35f0ead 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go +++ b/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go @@ -340,7 +340,10 @@ func TestImageLocalityPriority(t *testing.T) { state := framework.NewCycleState() fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) - p, _ := New(nil, fh) + p, err := New(ctx, nil, fh) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } var gotList framework.NodeScoreList for _, n := range test.nodes { nodeName := n.ObjectMeta.Name diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go b/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go index b7131eaf48fed..6f6c3c8feb6dc 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go @@ -17,6 +17,7 @@ limitations under the License. package interpodaffinity import ( + "context" "fmt" "k8s.io/apimachinery/pkg/labels" @@ -69,7 +70,7 @@ func (pl *InterPodAffinity) EventsToRegister() []framework.ClusterEventWithHint } // New initializes a new plugin and returns it. -func New(plArgs runtime.Object, h framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, plArgs runtime.Object, h framework.Handle) (framework.Plugin, error) { if h.SnapshotSharedLister() == nil { return nil, fmt.Errorf("SnapshotSharedlister is nil") } diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go index d9d431f9aebe0..9dcc65683c9f2 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go +++ b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go @@ -25,11 +25,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" + "k8s.io/kubernetes/pkg/scheduler/util" ) // NodeAffinity is a plugin that checks if a pod node selector matches the node label. @@ -83,10 +85,51 @@ func (s *preFilterState) Clone() framework.StateData { // failed by this plugin schedulable. func (pl *NodeAffinity) EventsToRegister() []framework.ClusterEventWithHint { return []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.Update}}, + {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, } } +// isSchedulableAfterNodeChange is invoked whenever a node changed. It checks whether +// that change made a previously unschedulable pod schedulable. +func (pl *NodeAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { + originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) + if err != nil { + return framework.Queue, err + } + + if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(modifiedNode) { + logger.V(4).Info("added or modified node didn't match scheduler-enforced node affinity and this event won't make the Pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) + return framework.QueueSkip, nil + } + + requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) + isMatched, err := requiredNodeAffinity.Match(modifiedNode) + if err != nil { + return framework.Queue, err + } + if !isMatched { + logger.V(4).Info("node was created or updated, but doesn't matches with the pod's NodeAffinity", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) + return framework.QueueSkip, nil + } + + wasMatched := false + if originalNode != nil { + wasMatched, err = requiredNodeAffinity.Match(originalNode) + if err != nil { + return framework.Queue, err + } + } + + if !wasMatched { + // This modification makes this Node match with Pod's NodeAffinity. + logger.V(4).Info("node was created or updated, and matches with the pod's NodeAffinity", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) + return framework.Queue, nil + } + + logger.V(4).Info("node was created or updated, but it doesn't make this pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) + return framework.QueueSkip, nil +} + // PreFilter builds and writes cycle state used by Filter. func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { affinity := pod.Spec.Affinity @@ -243,7 +286,7 @@ func (pl *NodeAffinity) ScoreExtensions() framework.ScoreExtensions { } // New initializes a new plugin and returns it. -func New(plArgs runtime.Object, h framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, plArgs runtime.Object, h framework.Handle) (framework.Plugin, error) { args, err := getArgs(plArgs) if err != nil { return nil, err diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go index 69778c27a092a..9de8397df9158 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go +++ b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -896,6 +897,7 @@ func TestNodeAffinity(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) node := v1.Node{ObjectMeta: metav1.ObjectMeta{ Name: test.nodeName, Labels: test.labels, @@ -903,7 +905,7 @@ func TestNodeAffinity(t *testing.T) { nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(&node) - p, err := New(&test.args, nil) + p, err := New(ctx, &test.args, nil) if err != nil { t.Fatalf("Creating plugin: %v", err) } @@ -1141,7 +1143,7 @@ func TestNodeAffinityPriority(t *testing.T) { state := framework.NewCycleState() fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes))) - p, err := New(&test.args, fh) + p, err := New(ctx, &test.args, fh) if err != nil { t.Fatalf("Creating plugin: %v", err) } @@ -1173,3 +1175,139 @@ func TestNodeAffinityPriority(t *testing.T) { }) } } + +func Test_isSchedulableAfterNodeChange(t *testing.T) { + podWithNodeAffinity := st.MakePod().NodeAffinityIn("foo", []string{"bar"}) + testcases := map[string]struct { + args *config.NodeAffinityArgs + pod *v1.Pod + oldObj, newObj interface{} + expectedHint framework.QueueingHint + expectedErr bool + }{ + "backoff-wrong-new-object": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.Obj(), + newObj: "not-a-node", + expectedHint: framework.Queue, + expectedErr: true, + }, + "backoff-wrong-old-object": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.Obj(), + oldObj: "not-a-node", + newObj: st.MakeNode().Obj(), + expectedHint: framework.Queue, + expectedErr: true, + }, + "skip-queue-on-add": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.Obj(), + newObj: st.MakeNode().Obj(), + expectedHint: framework.QueueSkip, + }, + "queue-on-add": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.Obj(), + newObj: st.MakeNode().Label("foo", "bar").Obj(), + expectedHint: framework.Queue, + }, + "skip-unrelated-changes": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.Obj(), + oldObj: st.MakeNode().Obj(), + newObj: st.MakeNode().Capacity(nil).Obj(), + expectedHint: framework.QueueSkip, + }, + "skip-unrelated-changes-on-labels": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.DeepCopy(), + oldObj: st.MakeNode().Obj(), + newObj: st.MakeNode().Label("k", "v").Obj(), + expectedHint: framework.QueueSkip, + }, + "skip-labels-changes-on-suitable-node": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.DeepCopy(), + oldObj: st.MakeNode().Label("foo", "bar").Obj(), + newObj: st.MakeNode().Label("foo", "bar").Label("k", "v").Obj(), + expectedHint: framework.QueueSkip, + }, + "skip-labels-changes-on-node-from-suitable-to-unsuitable": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.DeepCopy(), + oldObj: st.MakeNode().Label("foo", "bar").Obj(), + newObj: st.MakeNode().Label("k", "v").Obj(), + expectedHint: framework.QueueSkip, + }, + "queue-on-labels-change-makes-pod-schedulable": { + args: &config.NodeAffinityArgs{}, + pod: podWithNodeAffinity.Obj(), + oldObj: st.MakeNode().Obj(), + newObj: st.MakeNode().Label("foo", "bar").Obj(), + expectedHint: framework.Queue, + }, + "skip-queue-on-add-scheduler-enforced-node-affinity": { + args: &config.NodeAffinityArgs{ + AddedAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + }, + pod: podWithNodeAffinity.Obj(), + newObj: st.MakeNode().Obj(), + expectedHint: framework.QueueSkip, + }, + "queue-on-add-scheduler-enforced-node-affinity": { + args: &config.NodeAffinityArgs{ + AddedAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + }, + pod: podWithNodeAffinity.Obj(), + newObj: st.MakeNode().Label("foo", "bar").Obj(), + expectedHint: framework.Queue, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + logger, ctx := ktesting.NewTestContext(t) + p, err := New(ctx, tc.args, nil) + if err != nil { + t.Fatalf("Creating plugin: %v", err) + } + + actualHint, err := p.(*NodeAffinity).isSchedulableAfterNodeChange(logger, tc.pod, tc.oldObj, tc.newObj) + if tc.expectedErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.expectedHint, actualHint) + }) + } +} diff --git a/pkg/scheduler/framework/plugins/nodename/node_name.go b/pkg/scheduler/framework/plugins/nodename/node_name.go index 7adea806cb784..ad222e4cf0465 100644 --- a/pkg/scheduler/framework/plugins/nodename/node_name.go +++ b/pkg/scheduler/framework/plugins/nodename/node_name.go @@ -67,6 +67,6 @@ func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { } // New initializes a new plugin and returns it. -func New(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &NodeName{}, nil } diff --git a/pkg/scheduler/framework/plugins/nodename/node_name_test.go b/pkg/scheduler/framework/plugins/nodename/node_name_test.go index 90461ea5676a0..0b92da9213178 100644 --- a/pkg/scheduler/framework/plugins/nodename/node_name_test.go +++ b/pkg/scheduler/framework/plugins/nodename/node_name_test.go @@ -17,13 +17,13 @@ limitations under the License. package nodename import ( - "context" "reflect" "testing" v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/scheduler/framework" st "k8s.io/kubernetes/pkg/scheduler/testing" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestNodeName(t *testing.T) { @@ -55,9 +55,12 @@ func TestNodeName(t *testing.T) { t.Run(test.name, func(t *testing.T) { nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) - - p, _ := New(nil, nil) - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo) + _, ctx := ktesting.NewTestContext(t) + p, err := New(ctx, nil, nil) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } + gotStatus := p.(framework.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) } diff --git a/pkg/scheduler/framework/plugins/nodeports/node_ports.go b/pkg/scheduler/framework/plugins/nodeports/node_ports.go index 515aab09eebc8..79a80ce3804e8 100644 --- a/pkg/scheduler/framework/plugins/nodeports/node_ports.go +++ b/pkg/scheduler/framework/plugins/nodeports/node_ports.go @@ -22,8 +22,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" + "k8s.io/kubernetes/pkg/scheduler/util" ) // NodePorts is a plugin that checks if a node has free ports for the requested pod ports. @@ -112,11 +114,59 @@ func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error) func (pl *NodePorts) EventsToRegister() []framework.ClusterEventWithHint { return []framework.ClusterEventWithHint{ // Due to immutable fields `spec.containers[*].ports`, pod update events are ignored. - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}}, + {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}, QueueingHintFn: pl.isSchedulableAfterPodDeleted}, + // TODO(#110175): Ideally, it's supposed to register only NodeCreated, because NodeUpdated event never means to have any free ports for the Pod. + // But, we may miss NodeCreated event due to preCheck. + // See: https://github.com/kubernetes/kubernetes/issues/109437 + // And, we can remove NodeUpdated event once https://github.com/kubernetes/kubernetes/issues/110175 is solved. + // We don't need the QueueingHintFn here because the scheduling of Pods will be always retried with backoff when this Event happens. + // (the same as Queue) {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.Update}}, } } +// isSchedulableAfterPodDeleted is invoked whenever a pod deleted. It checks whether +// that change made a previously unschedulable pod schedulable. +func (pl *NodePorts) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { + deletedPod, _, err := util.As[*v1.Pod](oldObj, nil) + if err != nil { + return framework.Queue, err + } + + // If the deleted pod is unscheduled, it doesn't make the target pod schedulable. + if deletedPod.Spec.NodeName == "" { + logger.V(4).Info("the deleted pod is unscheduled and it doesn't make the target pod schedulable", "pod", pod, "deletedPod", deletedPod) + return framework.QueueSkip, nil + } + + // Get the used host ports of the deleted pod. + usedPorts := make(framework.HostPortInfo) + for _, container := range deletedPod.Spec.Containers { + for _, podPort := range container.Ports { + if podPort.HostPort > 0 { + usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) + } + } + } + + // If the deleted pod doesn't use any host ports, it doesn't make the target pod schedulable. + if len(usedPorts) == 0 { + return framework.QueueSkip, nil + } + + // Construct a fake NodeInfo that only has the deleted Pod. + // If we can schedule `pod` to this fake node, it means that `pod` and the deleted pod don't have any common port(s). + // So, deleting that pod couldn't make `pod` schedulable. + nodeInfo := framework.NodeInfo{UsedPorts: usedPorts} + if Fits(pod, &nodeInfo) { + logger.V(4).Info("the deleted pod and the target pod don't have any common port(s), returning QueueSkip as deleting this Pod won't make the Pod schedulable", "pod", pod, "deletedPod", deletedPod) + return framework.QueueSkip, nil + } + + logger.V(4).Info("the deleted pod and the target pod have any common port(s), returning Queue as deleting this Pod may make the Pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) + return framework.Queue, nil +} + // Filter invoked at the filter extension point. func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { wantPorts, err := getPreFilterState(cycleState) @@ -149,6 +199,6 @@ func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *framework.NodeInfo) bool } // New initializes a new plugin and returns it. -func New(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &NodePorts{}, nil } diff --git a/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go b/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go index fa9c419c00b9a..e3649de039bef 100644 --- a/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go +++ b/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go @@ -17,7 +17,6 @@ limitations under the License. package nodeports import ( - "context" "fmt" "reflect" "strconv" @@ -25,7 +24,10 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2/ktesting" + _ "k8s.io/klog/v2/ktesting/init" "k8s.io/kubernetes/pkg/scheduler/framework" st "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -143,9 +145,13 @@ func TestNodePorts(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - p, _ := New(nil, nil) + _, ctx := ktesting.NewTestContext(t) + p, err := New(ctx, nil, nil) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } cycleState := framework.NewCycleState() - _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod) + _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod) if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" { t.Errorf("preFilter status does not match (-want,+got): %s", diff) } @@ -155,7 +161,7 @@ func TestNodePorts(t *testing.T) { if !preFilterStatus.IsSuccess() { t.Errorf("prefilter failed with status: %v", preFilterStatus) } - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo) + gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo) if diff := cmp.Diff(test.wantFilterStatus, gotStatus); diff != "" { t.Errorf("filter status does not match (-want, +got): %s", diff) } @@ -164,13 +170,17 @@ func TestNodePorts(t *testing.T) { } func TestPreFilterDisabled(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) pod := &v1.Pod{} nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) - p, _ := New(nil, nil) + p, err := New(ctx, nil, nil) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } cycleState := framework.NewCycleState() - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, pod, nodeInfo) + gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo) wantStatus := framework.AsStatus(fmt.Errorf(`reading "PreFilterNodePorts" from cycleState: %w`, framework.ErrNotFound)) if !reflect.DeepEqual(gotStatus, wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus) @@ -290,3 +300,58 @@ func TestGetContainerPorts(t *testing.T) { }) } } + +func Test_isSchedulableAfterPodDeleted(t *testing.T) { + podWithHostPort := st.MakePod().HostPort(8080) + + testcases := map[string]struct { + pod *v1.Pod + oldObj interface{} + expectedHint framework.QueueingHint + expectedErr bool + }{ + "backoff-wrong-old-object": { + pod: podWithHostPort.Obj(), + oldObj: "not-a-pod", + expectedHint: framework.Queue, + expectedErr: true, + }, + "skip-queue-on-unscheduled": { + pod: podWithHostPort.Obj(), + oldObj: st.MakePod().Obj(), + expectedHint: framework.QueueSkip, + }, + "skip-queue-on-non-hostport": { + pod: podWithHostPort.Obj(), + oldObj: st.MakePod().Node("fake-node").Obj(), + expectedHint: framework.QueueSkip, + }, + "skip-queue-on-unrelated-hostport": { + pod: podWithHostPort.Obj(), + oldObj: st.MakePod().Node("fake-node").HostPort(8081).Obj(), + expectedHint: framework.QueueSkip, + }, + "queue-on-released-hostport": { + pod: podWithHostPort.Obj(), + oldObj: st.MakePod().Node("fake-node").HostPort(8080).Obj(), + expectedHint: framework.Queue, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + logger, ctx := ktesting.NewTestContext(t) + p, err := New(ctx, nil, nil) + if err != nil { + t.Fatalf("Creating plugin: %v", err) + } + actualHint, err := p.(*NodePorts).isSchedulableAfterPodDeleted(logger, tc.pod, tc.oldObj, nil) + if tc.expectedErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.expectedHint, actualHint) + }) + } +} diff --git a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go index f375be2d42f6f..288fa0d7ff33c 100644 --- a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go +++ b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go @@ -114,7 +114,7 @@ func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions { } // NewBalancedAllocation initializes a new plugin and returns it. -func NewBalancedAllocation(baArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewBalancedAllocation(_ context.Context, baArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { args, ok := baArgs.(*config.NodeResourcesBalancedAllocationArgs) if !ok { return nil, fmt.Errorf("want args to be of type NodeResourcesBalancedAllocationArgs, got %T", baArgs) diff --git a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go index f461a3d6d55f8..26d17b6af501a 100644 --- a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go @@ -389,7 +389,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) - p, _ := NewBalancedAllocation(&test.args, fh, feature.Features{}) + p, _ := NewBalancedAllocation(ctx, &test.args, fh, feature.Features{}) state := framework.NewCycleState() for i := range test.nodes { if test.runPreScore { diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index 04e9bcbf75784..ba09fa8628ad5 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -145,7 +145,7 @@ func (f *Fit) Name() string { } // NewFit initializes a new plugin and returns it. -func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewFit(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { args, ok := plArgs.(*config.NodeResourcesFitArgs) if !ok { return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs) diff --git a/pkg/scheduler/framework/plugins/noderesources/fit_test.go b/pkg/scheduler/framework/plugins/noderesources/fit_test.go index 50b27ceee9486..fb454fe675743 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit_test.go @@ -496,17 +496,20 @@ func TestEnoughRequests(t *testing.T) { test.args.ScoringStrategy = defaultScoringStrategy } - p, err := NewFit(&test.args, nil, plfeature.Features{}) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + p, err := NewFit(ctx, &test.args, nil, plfeature.Features{}) if err != nil { t.Fatal(err) } cycleState := framework.NewCycleState() - _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod) + _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod) if !preFilterStatus.IsSuccess() { t.Errorf("prefilter failed with status: %v", preFilterStatus) } - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo) + gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) } @@ -520,16 +523,19 @@ func TestEnoughRequests(t *testing.T) { } func TestPreFilterDisabled(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() pod := &v1.Pod{} nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) - p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{}) + p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{}) if err != nil { t.Fatal(err) } cycleState := framework.NewCycleState() - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, pod, nodeInfo) + gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo) wantStatus := framework.AsStatus(fmt.Errorf(`error reading "PreFilterNodeResourcesFit" from cycleState: %w`, framework.ErrNotFound)) if !reflect.DeepEqual(gotStatus, wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus) @@ -571,20 +577,23 @@ func TestNotEnoughRequests(t *testing.T) { } for _, test := range notEnoughPodsTests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}} test.nodeInfo.SetNode(&node) - p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{}) + p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{}) if err != nil { t.Fatal(err) } cycleState := framework.NewCycleState() - _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod) + _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod) if !preFilterStatus.IsSuccess() { t.Errorf("prefilter failed with status: %v", preFilterStatus) } - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo) + gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) } @@ -629,20 +638,23 @@ func TestStorageRequests(t *testing.T) { for _, test := range storagePodsTests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} test.nodeInfo.SetNode(&node) - p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{}) + p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{}) if err != nil { t.Fatal(err) } cycleState := framework.NewCycleState() - _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod) + _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod) if !preFilterStatus.IsSuccess() { t.Errorf("prefilter failed with status: %v", preFilterStatus) } - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo) + gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) } @@ -707,11 +719,14 @@ func TestRestartableInitContainers(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(0, 0, 1, 0, 0, 0)}} nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(&node) - p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{EnableSidecarContainers: test.enableSidecarContainers}) + p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{EnableSidecarContainers: test.enableSidecarContainers}) if err != nil { t.Fatal(err) } @@ -924,7 +939,7 @@ func TestFitScore(t *testing.T) { snapshot := cache.NewSnapshot(test.existingPods, test.nodes) fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) args := test.nodeResourcesFitArgs - p, err := NewFit(&args, fh, plfeature.Features{}) + p, err := NewFit(ctx, &args, fh, plfeature.Features{}) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go index 3c1c1ffc0a393..d3f3cc4e9a5af 100644 --- a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go @@ -395,6 +395,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) { fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) p, err := NewFit( + ctx, &config.NodeResourcesFitArgs{ ScoringStrategy: &config.ScoringStrategy{ Type: config.LeastAllocated, diff --git a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go index 03e56848066e8..d61037171d2eb 100644 --- a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go @@ -351,7 +351,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) { snapshot := cache.NewSnapshot(test.existingPods, test.nodes) fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) - p, err := NewFit( + p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ ScoringStrategy: &config.ScoringStrategy{ Type: config.MostAllocated, diff --git a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go index 3d32999671fee..9950acd45b4a7 100644 --- a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go @@ -111,7 +111,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) { snapshot := cache.NewSnapshot(test.existingPods, test.nodes) fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) - p, err := NewFit(&config.NodeResourcesFitArgs{ + p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ ScoringStrategy: &config.ScoringStrategy{ Type: config.RequestedToCapacityRatio, Resources: test.resources, @@ -320,7 +320,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) { }, }, } - p, err := NewFit(&args, fh, plfeature.Features{}) + p, err := NewFit(ctx, &args, fh, plfeature.Features{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -548,7 +548,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) { }, } - p, err := NewFit(&args, fh, plfeature.Features{}) + p, err := NewFit(ctx, &args, fh, plfeature.Features{}) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go index 8953fb731f665..674c9390b488e 100644 --- a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go +++ b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go @@ -22,8 +22,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" v1helper "k8s.io/component-helpers/scheduling/corev1" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" + "k8s.io/kubernetes/pkg/scheduler/util" ) // NodeUnschedulable plugin filters nodes that set node.Spec.Unschedulable=true unless @@ -48,10 +50,34 @@ const ( // failed by this plugin schedulable. func (pl *NodeUnschedulable) EventsToRegister() []framework.ClusterEventWithHint { return []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint}}, + {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint}, QueueingHintFn: pl.isSchedulableAfterNodeChange}, } } +// isSchedulableAfterNodeChange is invoked for all node events reported by +// an informer. It checks whether that change made a previously unschedulable +// pod schedulable. +func (pl *NodeUnschedulable) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { + originalNode, modifiedNode, err := util.As[*v1.Node](oldObj, newObj) + if err != nil { + logger.Error(err, "unexpected objects in isSchedulableAfterNodeChange", "oldObj", oldObj, "newObj", newObj) + return framework.Queue, err + } + + originalNodeSchedulable, modifiedNodeSchedulable := false, !modifiedNode.Spec.Unschedulable + if originalNode != nil { + originalNodeSchedulable = !originalNode.Spec.Unschedulable + } + + if !originalNodeSchedulable && modifiedNodeSchedulable { + logger.V(4).Info("node was created or updated, pod may be schedulable now", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) + return framework.Queue, nil + } + + logger.V(4).Info("node was created or updated, but it doesn't make this pod schedulable", "pod", klog.KObj(pod), "node", klog.KObj(modifiedNode)) + return framework.QueueSkip, nil +} + // Name returns name of the plugin. It is used in logs, etc. func (pl *NodeUnschedulable) Name() string { return Name @@ -78,6 +104,6 @@ func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState } // New initializes a new plugin and returns it. -func New(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &NodeUnschedulable{}, nil } diff --git a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go index 339a77953b4b6..f66bb5611d639 100644 --- a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go +++ b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go @@ -17,12 +17,12 @@ limitations under the License. package nodeunschedulable import ( - "context" "reflect" "testing" v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/scheduler/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestNodeUnschedulable(t *testing.T) { @@ -74,11 +74,114 @@ func TestNodeUnschedulable(t *testing.T) { for _, test := range testCases { nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) - - p, _ := New(nil, nil) - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo) + _, ctx := ktesting.NewTestContext(t) + p, err := New(ctx, nil, nil) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } + gotStatus := p.(framework.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) } } } + +func TestIsSchedulableAfterNodeChange(t *testing.T) { + testCases := []struct { + name string + pod *v1.Pod + oldObj, newObj interface{} + expectedHint framework.QueueingHint + expectedErr bool + }{ + { + name: "backoff-wrong-new-object", + pod: &v1.Pod{}, + newObj: "not-a-node", + expectedHint: framework.Queue, + expectedErr: true, + }, + { + name: "backoff-wrong-old-object", + pod: &v1.Pod{}, + newObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + }, + }, + oldObj: "not-a-node", + expectedHint: framework.Queue, + expectedErr: true, + }, + { + name: "skip-queue-on-unschedulable-node-added", + pod: &v1.Pod{}, + newObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + }, + }, + expectedHint: framework.QueueSkip, + }, + { + name: "queue-on-schedulable-node-added", + pod: &v1.Pod{}, + newObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: false, + }, + }, + expectedHint: framework.Queue, + }, + { + name: "skip-unrelated-change", + pod: &v1.Pod{}, + newObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + Taints: []v1.Taint{ + { + Key: v1.TaintNodeNotReady, + Effect: v1.TaintEffectNoExecute, + }, + }, + }, + }, + oldObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + }, + }, + expectedHint: framework.QueueSkip, + }, + { + name: "queue-on-unschedulable-field-change", + pod: &v1.Pod{}, + newObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: false, + }, + }, + oldObj: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + }, + }, + expectedHint: framework.Queue, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) + pl := &NodeUnschedulable{} + got, err := pl.isSchedulableAfterNodeChange(logger, testCase.pod, testCase.oldObj, testCase.newObj) + if err != nil && !testCase.expectedErr { + t.Errorf("unexpected error: %v", err) + } + if got != testCase.expectedHint { + t.Errorf("isSchedulableAfterNodeChange() = %v, want %v", got, testCase.expectedHint) + } + }) + } +} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go index e26401d39c2be..5db408b33ce7e 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -110,15 +110,17 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v node := nodeInfo.Node() + logger := klog.FromContext(ctx) + // If CSINode doesn't exist, the predicate may read the limits from Node object csiNode, err := pl.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default (2 releases) - klog.V(5).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) + logger.V(5).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) } newVolumes := make(map[string]string) - if err := pl.filterAttachableVolumes(pod, csiNode, true /* new pod */, newVolumes); err != nil { + if err := pl.filterAttachableVolumes(logger, pod, csiNode, true /* new pod */, newVolumes); err != nil { return framework.AsStatus(err) } @@ -135,7 +137,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v attachedVolumes := make(map[string]string) for _, existingPod := range nodeInfo.Pods { - if err := pl.filterAttachableVolumes(existingPod.Pod, csiNode, false /* existing pod */, attachedVolumes); err != nil { + if err := pl.filterAttachableVolumes(logger, existingPod.Pod, csiNode, false /* existing pod */, attachedVolumes); err != nil { return framework.AsStatus(err) } } @@ -156,7 +158,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v maxVolumeLimit, ok := nodeVolumeLimits[v1.ResourceName(volumeLimitKey)] if ok { currentVolumeCount := attachedVolumeCount[volumeLimitKey] - klog.V(5).InfoS("Found plugin volume limits", "node", node.Name, "volumeLimitKey", volumeLimitKey, + logger.V(5).Info("Found plugin volume limits", "node", node.Name, "volumeLimitKey", volumeLimitKey, "maxLimits", maxVolumeLimit, "currentVolumeCount", currentVolumeCount, "newVolumeCount", count, "pod", klog.KObj(pod)) if currentVolumeCount+count > int(maxVolumeLimit) { @@ -169,7 +171,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v } func (pl *CSILimits) filterAttachableVolumes( - pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error { + logger klog.Logger, pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error { for _, vol := range pod.Spec.Volumes { pvcName := "" isEphemeral := false @@ -190,7 +192,7 @@ func (pl *CSILimits) filterAttachableVolumes( // - If the volume is migratable and CSI migration is enabled, need to count it // as well. // - If the volume is not migratable, it will be count in non_csi filter. - if err := pl.checkAttachableInlineVolume(&vol, csiNode, pod, result); err != nil { + if err := pl.checkAttachableInlineVolume(logger, &vol, csiNode, pod, result); err != nil { return err } @@ -212,7 +214,7 @@ func (pl *CSILimits) filterAttachableVolumes( } // If the PVC is invalid, we don't count the volume because // there's no guarantee that it belongs to the running predicate. - klog.V(5).InfoS("Unable to look up PVC info", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName)) + logger.V(5).Info("Unable to look up PVC info", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName)) continue } @@ -223,9 +225,9 @@ func (pl *CSILimits) filterAttachableVolumes( } } - driverName, volumeHandle := pl.getCSIDriverInfo(csiNode, pvc) + driverName, volumeHandle := pl.getCSIDriverInfo(logger, csiNode, pvc) if driverName == "" || volumeHandle == "" { - klog.V(5).InfoS("Could not find a CSI driver name or volume handle, not counting volume") + logger.V(5).Info("Could not find a CSI driver name or volume handle, not counting volume") continue } @@ -238,7 +240,7 @@ func (pl *CSILimits) filterAttachableVolumes( // checkAttachableInlineVolume takes an inline volume and add to the result map if the // volume is migratable and CSI migration for this plugin has been enabled. -func (pl *CSILimits) checkAttachableInlineVolume(vol *v1.Volume, csiNode *storagev1.CSINode, +func (pl *CSILimits) checkAttachableInlineVolume(logger klog.Logger, vol *v1.Volume, csiNode *storagev1.CSINode, pod *v1.Pod, result map[string]string) error { if !pl.translator.IsInlineMigratable(vol) { return nil @@ -253,7 +255,7 @@ func (pl *CSILimits) checkAttachableInlineVolume(vol *v1.Volume, csiNode *storag if csiNode != nil { csiNodeName = csiNode.Name } - klog.V(5).InfoS("CSI Migration is not enabled for provisioner", "provisioner", inTreeProvisionerName, + logger.V(5).Info("CSI Migration is not enabled for provisioner", "provisioner", inTreeProvisionerName, "pod", klog.KObj(pod), "csiNode", csiNodeName) return nil } @@ -280,21 +282,21 @@ func (pl *CSILimits) checkAttachableInlineVolume(vol *v1.Volume, csiNode *storag // getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC. // If the PVC is from a migrated in-tree plugin, this function will return // the information of the CSI driver that the plugin has been migrated to. -func (pl *CSILimits) getCSIDriverInfo(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { +func (pl *CSILimits) getCSIDriverInfo(logger klog.Logger, csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { pvName := pvc.Spec.VolumeName if pvName == "" { - klog.V(5).InfoS("Persistent volume had no name for claim", "PVC", klog.KObj(pvc)) - return pl.getCSIDriverInfoFromSC(csiNode, pvc) + logger.V(5).Info("Persistent volume had no name for claim", "PVC", klog.KObj(pvc)) + return pl.getCSIDriverInfoFromSC(logger, csiNode, pvc) } pv, err := pl.pvLister.Get(pvName) if err != nil { - klog.V(5).InfoS("Unable to look up PV info for PVC and PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvName)) + logger.V(5).Info("Unable to look up PV info for PVC and PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvName)) // If we can't fetch PV associated with PVC, may be it got deleted // or PVC was prebound to a PVC that hasn't been created yet. // fallback to using StorageClass for volume counting - return pl.getCSIDriverInfoFromSC(csiNode, pvc) + return pl.getCSIDriverInfoFromSC(logger, csiNode, pvc) } csiSource := pv.Spec.PersistentVolumeSource.CSI @@ -306,23 +308,23 @@ func (pl *CSILimits) getCSIDriverInfo(csiNode *storagev1.CSINode, pvc *v1.Persis pluginName, err := pl.translator.GetInTreePluginNameFromSpec(pv, nil) if err != nil { - klog.V(5).InfoS("Unable to look up plugin name from PV spec", "err", err) + logger.V(5).Info("Unable to look up plugin name from PV spec", "err", err) return "", "" } if !isCSIMigrationOn(csiNode, pluginName) { - klog.V(5).InfoS("CSI Migration of plugin is not enabled", "plugin", pluginName) + logger.V(5).Info("CSI Migration of plugin is not enabled", "plugin", pluginName) return "", "" } csiPV, err := pl.translator.TranslateInTreePVToCSI(pv) if err != nil { - klog.V(5).InfoS("Unable to translate in-tree volume to CSI", "err", err) + logger.V(5).Info("Unable to translate in-tree volume to CSI", "err", err) return "", "" } if csiPV.Spec.PersistentVolumeSource.CSI == nil { - klog.V(5).InfoS("Unable to get a valid volume source for translated PV", "PV", pvName) + logger.V(5).Info("Unable to get a valid volume source for translated PV", "PV", pvName) return "", "" } @@ -333,7 +335,7 @@ func (pl *CSILimits) getCSIDriverInfo(csiNode *storagev1.CSINode, pvc *v1.Persis } // getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass. -func (pl *CSILimits) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { +func (pl *CSILimits) getCSIDriverInfoFromSC(logger klog.Logger, csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) { namespace := pvc.Namespace pvcName := pvc.Name scName := storagehelpers.GetPersistentVolumeClaimClass(pvc) @@ -341,13 +343,13 @@ func (pl *CSILimits) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1. // If StorageClass is not set or not found, then PVC must be using immediate binding mode // and hence it must be bound before scheduling. So it is safe to not count it. if scName == "" { - klog.V(5).InfoS("PVC has no StorageClass", "PVC", klog.KObj(pvc)) + logger.V(5).Info("PVC has no StorageClass", "PVC", klog.KObj(pvc)) return "", "" } storageClass, err := pl.scLister.Get(scName) if err != nil { - klog.V(5).InfoS("Could not get StorageClass for PVC", "PVC", klog.KObj(pvc), "err", err) + logger.V(5).Info("Could not get StorageClass for PVC", "PVC", klog.KObj(pvc), "err", err) return "", "" } @@ -359,13 +361,13 @@ func (pl *CSILimits) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1. provisioner := storageClass.Provisioner if pl.translator.IsMigratableIntreePluginByName(provisioner) { if !isCSIMigrationOn(csiNode, provisioner) { - klog.V(5).InfoS("CSI Migration of provisioner is not enabled", "provisioner", provisioner) + logger.V(5).Info("CSI Migration of provisioner is not enabled", "provisioner", provisioner) return "", "" } driverName, err := pl.translator.GetCSINameFromInTreeName(provisioner) if err != nil { - klog.V(5).InfoS("Unable to look up driver name from provisioner name", "provisioner", provisioner, "err", err) + logger.V(5).Info("Unable to look up driver name from provisioner name", "provisioner", provisioner, "err", err) return "", "" } return driverName, volumeHandle @@ -375,7 +377,7 @@ func (pl *CSILimits) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1. } // NewCSI initializes a new plugin and returns it. -func NewCSI(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewCSI(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go index 289a16c12525c..1207176abc5fd 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go @@ -33,11 +33,11 @@ import ( csitrans "k8s.io/csi-translation-lib" csilibplugins "k8s.io/csi-translation-lib/plugins" "k8s.io/kubernetes/pkg/scheduler/framework" - fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/utils/ktesting" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -281,7 +281,7 @@ func TestCSILimits(t *testing.T) { existingPods []*v1.Pod extraClaims []v1.PersistentVolumeClaim filterName string - maxVols int + maxVols int32 driverNames []string test string migrationEnabled bool @@ -613,7 +613,7 @@ func TestCSILimits(t *testing.T) { // running attachable predicate tests with feature gate and limit present on nodes for _, test := range tests { t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, int64(test.maxVols), test.driverNames...) + node, csiNode := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, test.maxVols, test.driverNames...) if csiNode != nil { enableMigrationOnNode(csiNode, csilibplugins.AWSEBSInTreePluginName) } @@ -641,8 +641,8 @@ func TestCSILimits(t *testing.T) { } } -func getFakeCSIPVLister(volumeName string, driverNames ...string) fakeframework.PersistentVolumeLister { - pvLister := fakeframework.PersistentVolumeLister{} +func getFakeCSIPVLister(volumeName string, driverNames ...string) tf.PersistentVolumeLister { + pvLister := tf.PersistentVolumeLister{} for _, driver := range driverNames { for j := 0; j < 4; j++ { volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) @@ -686,8 +686,8 @@ func getFakeCSIPVLister(volumeName string, driverNames ...string) fakeframework. return pvLister } -func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakeframework.PersistentVolumeClaimLister { - pvcLister := fakeframework.PersistentVolumeClaimLister{} +func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) tf.PersistentVolumeClaimLister { + pvcLister := tf.PersistentVolumeClaimLister{} for _, driver := range driverNames { for j := 0; j < 4; j++ { v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) @@ -729,8 +729,8 @@ func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) { csiNode.Annotations = nodeInfoAnnotations } -func getFakeCSIStorageClassLister(scName, provisionerName string) fakeframework.StorageClassLister { - return fakeframework.StorageClassLister{ +func getFakeCSIStorageClassLister(scName, provisionerName string) tf.StorageClassLister { + return tf.StorageClassLister{ { ObjectMeta: metav1.ObjectMeta{Name: scName}, Provisioner: provisionerName, @@ -738,15 +738,15 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakeframework. } } -func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakeframework.CSINodeLister { - csiNodeLister := fakeframework.CSINodeLister{} +func getFakeCSINodeLister(csiNode *storagev1.CSINode) tf.CSINodeLister { + csiNodeLister := tf.CSINodeLister{} if csiNode != nil { csiNodeLister = append(csiNodeLister, *csiNode.DeepCopy()) } return csiNodeLister } -func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*framework.NodeInfo, *storagev1.CSINode) { +func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int32, driverNames ...string) (*framework.NodeInfo, *storagev1.CSINode) { nodeInfo := framework.NewNodeInfo(pods...) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, @@ -758,7 +758,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int addLimitToNode := func() { for _, driver := range driverNames { - node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI) + node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(int64(limit), resource.DecimalSI) } } @@ -780,7 +780,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int } if addLimits { driver.Allocatable = &storagev1.VolumeNodeResources{ - Count: pointer.Int32(int32(limit)), + Count: ptr.To(limit), } } csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver) diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go index 763c6f45ddd7f..150b21dbe3eb0 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go @@ -70,36 +70,36 @@ const ( const AzureDiskName = names.AzureDiskLimits // NewAzureDisk returns function that initializes a new plugin and returns it. -func NewAzureDisk(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewAzureDisk(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(azureDiskVolumeFilterType, informerFactory, fts), nil + return newNonCSILimitsWithInformerFactory(ctx, azureDiskVolumeFilterType, informerFactory, fts), nil } // CinderName is the name of the plugin used in the plugin registry and configurations. const CinderName = names.CinderLimits // NewCinder returns function that initializes a new plugin and returns it. -func NewCinder(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewCinder(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(cinderVolumeFilterType, informerFactory, fts), nil + return newNonCSILimitsWithInformerFactory(ctx, cinderVolumeFilterType, informerFactory, fts), nil } // EBSName is the name of the plugin used in the plugin registry and configurations. const EBSName = names.EBSLimits // NewEBS returns function that initializes a new plugin and returns it. -func NewEBS(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewEBS(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(ebsVolumeFilterType, informerFactory, fts), nil + return newNonCSILimitsWithInformerFactory(ctx, ebsVolumeFilterType, informerFactory, fts), nil } // GCEPDName is the name of the plugin used in the plugin registry and configurations. const GCEPDName = names.GCEPDLimits // NewGCEPD returns function that initializes a new plugin and returns it. -func NewGCEPD(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { +func NewGCEPD(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(gcePDVolumeFilterType, informerFactory, fts), nil + return newNonCSILimitsWithInformerFactory(ctx, gcePDVolumeFilterType, informerFactory, fts), nil } // nonCSILimits contains information to check the max number of volumes for a plugin. @@ -125,6 +125,7 @@ var _ framework.EnqueueExtensions = &nonCSILimits{} // newNonCSILimitsWithInformerFactory returns a plugin with filter name and informer factory. func newNonCSILimitsWithInformerFactory( + ctx context.Context, filterName string, informerFactory informers.SharedInformerFactory, fts feature.Features, @@ -134,7 +135,7 @@ func newNonCSILimitsWithInformerFactory( csiNodesLister := informerFactory.Storage().V1().CSINodes().Lister() scLister := informerFactory.Storage().V1().StorageClasses().Lister() - return newNonCSILimits(filterName, csiNodesLister, scLister, pvLister, pvcLister, fts) + return newNonCSILimits(ctx, filterName, csiNodesLister, scLister, pvLister, pvcLister, fts) } // newNonCSILimits creates a plugin which evaluates whether a pod can fit based on the @@ -148,6 +149,7 @@ func newNonCSILimitsWithInformerFactory( // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // the maximum. func newNonCSILimits( + ctx context.Context, filterName string, csiNodeLister storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister, @@ -155,6 +157,7 @@ func newNonCSILimits( pvcLister corelisters.PersistentVolumeClaimLister, fts feature.Features, ) framework.Plugin { + logger := klog.FromContext(ctx) var filter VolumeFilter var volumeLimitKey v1.ResourceName var name string @@ -177,14 +180,14 @@ func newNonCSILimits( filter = cinderVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey) default: - klog.ErrorS(errors.New("wrong filterName"), "Cannot create nonCSILimits plugin") + logger.Error(errors.New("wrong filterName"), "Cannot create nonCSILimits plugin") return nil } pl := &nonCSILimits{ name: name, filter: filter, volumeLimitKey: volumeLimitKey, - maxVolumeFunc: getMaxVolumeFunc(filterName), + maxVolumeFunc: getMaxVolumeFunc(logger, filterName), csiNodeLister: csiNodeLister, pvLister: pvLister, pvcLister: pvcLister, @@ -238,8 +241,9 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod return nil } + logger := klog.FromContext(ctx) newVolumes := sets.New[string]() - if err := pl.filterVolumes(pod, true /* new pod */, newVolumes); err != nil { + if err := pl.filterVolumes(logger, pod, true /* new pod */, newVolumes); err != nil { return framework.AsStatus(err) } @@ -257,7 +261,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod if err != nil { // we don't fail here because the CSINode object is only necessary // for determining whether the migration is enabled or not - klog.V(5).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) + logger.V(5).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) } } @@ -269,7 +273,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod // count unique volumes existingVolumes := sets.New[string]() for _, existingPod := range nodeInfo.Pods { - if err := pl.filterVolumes(existingPod.Pod, false /* existing pod */, existingVolumes); err != nil { + if err := pl.filterVolumes(logger, existingPod.Pod, false /* existing pod */, existingVolumes); err != nil { return framework.AsStatus(err) } } @@ -293,7 +297,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod return nil } -func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes sets.Set[string]) error { +func (pl *nonCSILimits) filterVolumes(logger klog.Logger, pod *v1.Pod, newPod bool, filteredVolumes sets.Set[string]) error { volumes := pod.Spec.Volumes for i := range volumes { vol := &volumes[i] @@ -336,7 +340,7 @@ func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes } // If the PVC is invalid, we don't count the volume because // there's no guarantee that it belongs to the running predicate. - klog.V(4).InfoS("Unable to look up PVC info, assuming PVC doesn't match predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName), "err", err) + logger.V(4).Info("Unable to look up PVC info, assuming PVC doesn't match predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName), "err", err) continue } @@ -354,7 +358,7 @@ func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes // original PV where it was bound to, so we count the volume if // it belongs to the running predicate. if pl.matchProvisioner(pvc) { - klog.V(4).InfoS("PVC is not bound, assuming PVC matches predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName)) + logger.V(4).Info("PVC is not bound, assuming PVC matches predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName)) filteredVolumes.Insert(pvID) } continue @@ -365,7 +369,7 @@ func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes // If the PV is invalid and PVC belongs to the running predicate, // log the error and count the PV towards the PV limit. if pl.matchProvisioner(pvc) { - klog.V(4).InfoS("Unable to look up PV, assuming PV matches predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName), "PV", klog.KRef("", pvName), "err", err) + logger.V(4).Info("Unable to look up PV, assuming PV matches predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName), "PV", klog.KRef("", pvName), "err", err) filteredVolumes.Insert(pvID) } continue @@ -394,12 +398,12 @@ func (pl *nonCSILimits) matchProvisioner(pvc *v1.PersistentVolumeClaim) bool { } // getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value. -func getMaxVolLimitFromEnv() int { +func getMaxVolLimitFromEnv(logger klog.Logger) int { if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { - klog.ErrorS(err, "Unable to parse maximum PD volumes value, using default") + logger.Error(err, "Unable to parse maximum PD volumes value, using default") } else if parsedMaxVols <= 0 { - klog.ErrorS(errors.New("maximum PD volumes is negative"), "Unable to parse maximum PD volumes value, using default") + logger.Error(errors.New("maximum PD volumes is negative"), "Unable to parse maximum PD volumes value, using default") } else { return parsedMaxVols } @@ -520,9 +524,9 @@ var cinderVolumeFilter = VolumeFilter{ }, } -func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { +func getMaxVolumeFunc(logger klog.Logger, filterName string) func(node *v1.Node) int { return func(node *v1.Node) int { - maxVolumesFromEnv := getMaxVolLimitFromEnv() + maxVolumesFromEnv := getMaxVolLimitFromEnv(logger) if maxVolumesFromEnv > 0 { return maxVolumesFromEnv } diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go index 5b0b71d562b67..c34dd9630af56 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go @@ -28,11 +28,12 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" csilibplugins "k8s.io/csi-translation-lib/plugins" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/scheduler/framework" - fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" st "k8s.io/kubernetes/pkg/scheduler/testing" - "k8s.io/utils/pointer" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" + "k8s.io/utils/ptr" ) var ( @@ -129,7 +130,7 @@ func TestEphemeralLimits(t *testing.T) { existingPods []*v1.Pod extraClaims []v1.PersistentVolumeClaim ephemeralEnabled bool - maxVols int + maxVols int32 test string wantStatus *framework.Status wantPreFilterStatus *framework.Status @@ -181,16 +182,17 @@ func TestEphemeralLimits(t *testing.T) { for _, test := range tests { t.Run(test.test, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) fts := feature.Features{} - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), filterName) - p := newNonCSILimits(filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(filterName, driverName), getFakePVLister(filterName), append(getFakePVCLister(filterName), test.extraClaims...), fts).(framework.FilterPlugin) - _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod) + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, filterName) + p := newNonCSILimits(ctx, filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(filterName, driverName), getFakePVLister(filterName), append(getFakePVCLister(filterName), test.extraClaims...), fts).(framework.FilterPlugin) + _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(ctx, nil, test.newPod) if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { t.Errorf("PreFilter status does not match (-want, +got): %s", diff) } if gotPreFilterStatus.Code() != framework.Skip { - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) + gotStatus := p.Filter(ctx, nil, test.newPod, node) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) } @@ -241,7 +243,7 @@ func TestAzureDiskLimits(t *testing.T) { existingPods []*v1.Pod filterName string driverName string - maxVols int + maxVols int32 test string wantStatus *framework.Status wantPreFilterStatus *framework.Status @@ -412,8 +414,9 @@ func TestAzureDiskLimits(t *testing.T) { for _, test := range tests { t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) + _, ctx := ktesting.NewTestContext(t) + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, test.filterName) + p := newNonCSILimits(ctx, test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod) if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { t.Errorf("PreFilter status does not match (-want, +got): %s", diff) @@ -476,7 +479,7 @@ func TestEBSLimits(t *testing.T) { existingPods []*v1.Pod filterName string driverName string - maxVols int + maxVols int32 test string wantStatus *framework.Status wantPreFilterStatus *framework.Status @@ -693,15 +696,16 @@ func TestEBSLimits(t *testing.T) { for _, test := range tests { t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) - _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod) + _, ctx := ktesting.NewTestContext(t) + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, test.filterName) + p := newNonCSILimits(ctx, test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) + _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(ctx, nil, test.newPod) if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { t.Errorf("PreFilter status does not match (-want, +got): %s", diff) } if gotPreFilterStatus.Code() != framework.Skip { - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) + gotStatus := p.Filter(ctx, nil, test.newPod, node) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) } @@ -752,7 +756,7 @@ func TestGCEPDLimits(t *testing.T) { existingPods []*v1.Pod filterName string driverName string - maxVols int + maxVols int32 test string wantStatus *framework.Status wantPreFilterStatus *framework.Status @@ -923,8 +927,9 @@ func TestGCEPDLimits(t *testing.T) { for _, test := range tests { t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) + _, ctx := ktesting.NewTestContext(t) + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, test.filterName) + p := newNonCSILimits(ctx, test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod) if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { t.Errorf("PreFilter status does not match (-want, +got): %s", diff) @@ -965,8 +970,9 @@ func TestGetMaxVols(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) t.Setenv(KubeMaxPDVols, test.rawMaxVols) - result := getMaxVolLimitFromEnv() + result := getMaxVolLimitFromEnv(logger) if result != test.expected { t.Errorf("expected %v got %v", test.expected, result) } @@ -974,8 +980,8 @@ func TestGetMaxVols(t *testing.T) { } } -func getFakePVCLister(filterName string) fakeframework.PersistentVolumeClaimLister { - return fakeframework.PersistentVolumeClaimLister{ +func getFakePVCLister(filterName string) tf.PersistentVolumeClaimLister { + return tf.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, Spec: v1.PersistentVolumeClaimSpec{ @@ -1022,21 +1028,21 @@ func getFakePVCLister(filterName string) fakeframework.PersistentVolumeClaimList ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithDefaultSCPod"}, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "", - StorageClassName: pointer.String("standard-sc"), + StorageClassName: ptr.To("standard-sc"), }, }, { ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithInvalidSCPod"}, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "", - StorageClassName: pointer.String("invalid-sc"), + StorageClassName: ptr.To("invalid-sc"), }, }, } } -func getFakePVLister(filterName string) fakeframework.PersistentVolumeLister { - return fakeframework.PersistentVolumeLister{ +func getFakePVLister(filterName string) tf.PersistentVolumeLister { + return tf.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index b822b400b0394..4643605182c1a 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -36,7 +36,7 @@ import ( frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var cmpOpts = []cmp.Option{ @@ -2731,7 +2731,7 @@ func TestSingleConstraint(t *testing.T) { "node", v1.DoNotSchedule, fooSelector, - pointer.Int32(4), // larger than the number of domains(3) + ptr.To[int32](4), // larger than the number of domains(3) nil, nil, nil, @@ -2762,7 +2762,7 @@ func TestSingleConstraint(t *testing.T) { "node", v1.DoNotSchedule, fooSelector, - pointer.Int32(2), // smaller than the number of domains(3) + ptr.To[int32](2), // smaller than the number of domains(3) nil, nil, nil, @@ -2793,7 +2793,7 @@ func TestSingleConstraint(t *testing.T) { "zone", v1.DoNotSchedule, fooSelector, - pointer.Int32(3), // larger than the number of domains(2) + ptr.To[int32](3), // larger than the number of domains(2) nil, nil, nil, @@ -2824,7 +2824,7 @@ func TestSingleConstraint(t *testing.T) { "zone", v1.DoNotSchedule, fooSelector, - pointer.Int32(1), // smaller than the number of domains(2) + ptr.To[int32](1), // smaller than the number of domains(2) nil, nil, nil, diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go b/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go index 17803e4ee0f57..982da6875a20c 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go @@ -17,6 +17,7 @@ limitations under the License. package podtopologyspread import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -82,7 +83,7 @@ func (pl *PodTopologySpread) Name() string { } // New initializes a new plugin and returns it. -func New(plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { +func New(_ context.Context, plArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) { if h.SnapshotSharedLister() == nil { return nil, fmt.Errorf("SnapshotSharedlister is nil") } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go index af3c906e51bc0..ed3d3d15c27c6 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go @@ -37,7 +37,7 @@ import ( frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, New) @@ -95,7 +95,7 @@ func TestPreScoreSkip(t *testing.T) { if err != nil { t.Fatalf("Failed creating framework runtime: %v", err) } - pl, err := New(&tt.config, f, feature.Features{}) + pl, err := New(ctx, &tt.config, f, feature.Features{}) if err != nil { t.Fatalf("Failed creating plugin: %v", err) } @@ -103,7 +103,7 @@ func TestPreScoreSkip(t *testing.T) { informerFactory.WaitForCacheSync(ctx.Done()) p := pl.(*PodTopologySpread) cs := framework.NewCycleState() - if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSkip() { + if s := p.PreScore(ctx, cs, tt.pod, tt.nodes); !s.IsSkip() { t.Fatalf("Expected skip but got %v", s.AsError()) } }) @@ -155,8 +155,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2), topologyNormalizingWeight(3)}, }, @@ -187,8 +187,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -228,7 +228,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New("node-x"), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(2)}, }, @@ -270,8 +270,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: v1.LabelTopologyZone, value: "mars"}: pointer.Int64(0), - {key: v1.LabelTopologyZone, value: ""}: pointer.Int64(0), + {key: v1.LabelTopologyZone, value: "mars"}: ptr.To[int64](0), + {key: v1.LabelTopologyZone, value: ""}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(4), topologyNormalizingWeight(2)}, }, @@ -321,7 +321,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "planet", value: "mars"}: pointer.Int64(0), + {key: "planet", value: "mars"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)}, }, @@ -362,7 +362,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {"planet", "mars"}: pointer.Int64(0), + {"planet", "mars"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1)}, }, @@ -394,8 +394,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -428,8 +428,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -462,8 +462,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -496,8 +496,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -529,8 +529,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -562,8 +562,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -582,7 +582,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { if err != nil { t.Fatalf("Failed creating framework runtime: %v", err) } - pl, err := New(&tt.config, f, feature.Features{EnableNodeInclusionPolicyInPodTopologySpread: tt.enableNodeInclusionPolicy}) + pl, err := New(ctx, &tt.config, f, feature.Features{EnableNodeInclusionPolicyInPodTopologySpread: tt.enableNodeInclusionPolicy}) if err != nil { t.Fatalf("Failed creating plugin: %v", err) } @@ -1089,7 +1089,7 @@ func TestPodTopologySpreadScore(t *testing.T) { "node", v1.ScheduleAnyway, fooSelector, - pointer.Int32(10), // larger than the number of domains(3) + ptr.To[int32](10), // larger than the number of domains(3) nil, nil, nil, @@ -1336,7 +1336,8 @@ func TestPodTopologySpreadScore(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) t.Cleanup(cancel) allNodes := append([]*v1.Node{}, tt.nodes...) allNodes = append(allNodes, tt.failedNodes...) @@ -1346,7 +1347,7 @@ func TestPodTopologySpreadScore(t *testing.T) { p.enableNodeInclusionPolicyInPodTopologySpread = tt.enableNodeInclusionPolicy p.enableMatchLabelKeysInPodTopologySpread = tt.enableMatchLabelKeys - status := p.PreScore(context.Background(), state, tt.pod, tt.nodes) + status := p.PreScore(ctx, state, tt.pod, tt.nodes) if !status.IsSuccess() { t.Errorf("unexpected error: %v", status) } diff --git a/pkg/scheduler/framework/plugins/queuesort/priority_sort.go b/pkg/scheduler/framework/plugins/queuesort/priority_sort.go index 43eaa74bd2a59..1a6cfde414023 100644 --- a/pkg/scheduler/framework/plugins/queuesort/priority_sort.go +++ b/pkg/scheduler/framework/plugins/queuesort/priority_sort.go @@ -17,6 +17,7 @@ limitations under the License. package queuesort import ( + "context" "k8s.io/apimachinery/pkg/runtime" corev1helpers "k8s.io/component-helpers/scheduling/corev1" "k8s.io/kubernetes/pkg/scheduler/framework" @@ -46,6 +47,6 @@ func (pl *PrioritySort) Less(pInfo1, pInfo2 *framework.QueuedPodInfo) bool { } // New initializes a new plugin and returns it. -func New(_ runtime.Object, handle framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) { return &PrioritySort{}, nil } diff --git a/pkg/scheduler/framework/plugins/registry.go b/pkg/scheduler/framework/plugins/registry.go index c5b8cc680882f..be74711e4a2c5 100644 --- a/pkg/scheduler/framework/plugins/registry.go +++ b/pkg/scheduler/framework/plugins/registry.go @@ -47,7 +47,6 @@ import ( func NewInTreeRegistry() runtime.Registry { fts := plfeature.Features{ EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation), - EnableReadWriteOncePod: feature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod), EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority), EnableMinDomainsInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.MinDomainsInPodTopologySpread), EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread), diff --git a/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go b/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go index 5c0678cb0d84d..f7f5d35a17ac7 100644 --- a/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go +++ b/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates.go @@ -46,7 +46,7 @@ func (pl *SchedulingGates) PreEnqueue(ctx context.Context, p *v1.Pod) *framework if !pl.enablePodSchedulingReadiness || len(p.Spec.SchedulingGates) == 0 { return nil } - var gates []string + gates := make([]string, 0, len(p.Spec.SchedulingGates)) for _, gate := range p.Spec.SchedulingGates { gates = append(gates, gate.Name) } @@ -62,6 +62,6 @@ func (pl *SchedulingGates) EventsToRegister() []framework.ClusterEventWithHint { } // New initializes a new plugin and returns it. -func New(_ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) { return &SchedulingGates{enablePodSchedulingReadiness: fts.EnablePodSchedulingReadiness}, nil } diff --git a/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates_test.go b/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates_test.go index 670989a1eb9ac..b4bbc2a72f479 100644 --- a/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates_test.go +++ b/pkg/scheduler/framework/plugins/schedulinggates/scheduling_gates_test.go @@ -17,7 +17,6 @@ limitations under the License. package schedulinggates import ( - "context" "testing" "github.com/google/go-cmp/cmp" @@ -26,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" st "k8s.io/kubernetes/pkg/scheduler/testing" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestPreEnqueue(t *testing.T) { @@ -63,12 +63,13 @@ func TestPreEnqueue(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - p, err := New(nil, nil, feature.Features{EnablePodSchedulingReadiness: tt.enablePodSchedulingReadiness}) + _, ctx := ktesting.NewTestContext(t) + p, err := New(ctx, nil, nil, feature.Features{EnablePodSchedulingReadiness: tt.enablePodSchedulingReadiness}) if err != nil { t.Fatalf("Creating plugin: %v", err) } - got := p.(framework.PreEnqueuePlugin).PreEnqueue(context.Background(), tt.pod) + got := p.(framework.PreEnqueuePlugin).PreEnqueue(ctx, tt.pod) if diff := cmp.Diff(tt.want, got); diff != "" { t.Errorf("unexpected status (-want, +got):\n%s", diff) } diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go index 9d1bbe85caf6d..edec5bd1b0a82 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -164,6 +164,6 @@ func (pl *TaintToleration) ScoreExtensions() framework.ScoreExtensions { } // New initializes a new plugin and returns it. -func New(_ runtime.Object, h framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, h framework.Handle) (framework.Plugin, error) { return &TaintToleration{handle: h}, nil } diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go index fca4262d81559..bdeb23cf75970 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go @@ -237,7 +237,10 @@ func TestTaintTolerationScore(t *testing.T) { snapshot := cache.NewSnapshot(nil, test.nodes) fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot)) - p, _ := New(nil, fh) + p, err := New(ctx, nil, fh) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) if !status.IsSuccess() { t.Errorf("unexpected error: %v", status) @@ -335,10 +338,14 @@ func TestTaintTolerationFilter(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) - p, _ := New(nil, nil) - gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo) + p, err := New(ctx, nil, nil) + if err != nil { + t.Fatalf("creating plugin: %v", err) + } + gotStatus := p.(framework.FilterPlugin).Filter(ctx, nil, test.pod, nodeInfo) if !reflect.DeepEqual(gotStatus, test.wantStatus) { t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) } diff --git a/pkg/scheduler/framework/plugins/testing/testing.go b/pkg/scheduler/framework/plugins/testing/testing.go index 2a525deb09ffe..1e7d4b3485633 100644 --- a/pkg/scheduler/framework/plugins/testing/testing.go +++ b/pkg/scheduler/framework/plugins/testing/testing.go @@ -49,7 +49,7 @@ func SetupPluginWithInformers( if err != nil { tb.Fatalf("Failed creating framework runtime: %v", err) } - p, err := pf(config, fh) + p, err := pf(ctx, config, fh) if err != nil { tb.Fatal(err) } @@ -72,7 +72,7 @@ func SetupPlugin( if err != nil { tb.Fatalf("Failed creating framework runtime: %v", err) } - p, err := pf(config, fh) + p, err := pf(ctx, config, fh) if err != nil { tb.Fatal(err) } diff --git a/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go b/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go index 5b51277622186..77b78d172b1b4 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go +++ b/pkg/scheduler/framework/plugins/volumebinding/assume_cache.go @@ -87,6 +87,10 @@ func (e *errObjectName) Error() string { // Restore() sets the latest object pointer back to the informer object. // Get/List() always returns the latest object pointer. type assumeCache struct { + // The logger that was chosen when setting up the cache. + // Will be used for all operations. + logger klog.Logger + // Synchronizes updates to store rwMutex sync.RWMutex @@ -129,8 +133,9 @@ func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) { } // NewAssumeCache creates an assume cache for general objects. -func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache { +func NewAssumeCache(logger klog.Logger, informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache { c := &assumeCache{ + logger: logger, description: description, indexFunc: indexFunc, indexName: indexName, @@ -161,7 +166,7 @@ func (c *assumeCache) add(obj interface{}) { name, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { - klog.ErrorS(&errObjectName{err}, "Add failed") + c.logger.Error(&errObjectName{err}, "Add failed") return } @@ -171,29 +176,29 @@ func (c *assumeCache) add(obj interface{}) { if objInfo, _ := c.getObjInfo(name); objInfo != nil { newVersion, err := c.getObjVersion(name, obj) if err != nil { - klog.ErrorS(err, "Add failed: couldn't get object version") + c.logger.Error(err, "Add failed: couldn't get object version") return } storedVersion, err := c.getObjVersion(name, objInfo.latestObj) if err != nil { - klog.ErrorS(err, "Add failed: couldn't get stored object version") + c.logger.Error(err, "Add failed: couldn't get stored object version") return } // Only update object if version is newer. // This is so we don't override assumed objects due to informer resync. if newVersion <= storedVersion { - klog.V(10).InfoS("Skip adding object to assume cache because version is not newer than storedVersion", "description", c.description, "cacheKey", name, "newVersion", newVersion, "storedVersion", storedVersion) + c.logger.V(10).Info("Skip adding object to assume cache because version is not newer than storedVersion", "description", c.description, "cacheKey", name, "newVersion", newVersion, "storedVersion", storedVersion) return } } objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj} if err = c.store.Update(objInfo); err != nil { - klog.InfoS("Error occurred while updating stored object", "err", err) + c.logger.Info("Error occurred while updating stored object", "err", err) } else { - klog.V(10).InfoS("Adding object to assume cache", "description", c.description, "cacheKey", name, "assumeCache", obj) + c.logger.V(10).Info("Adding object to assume cache", "description", c.description, "cacheKey", name, "assumeCache", obj) } } @@ -208,7 +213,7 @@ func (c *assumeCache) delete(obj interface{}) { name, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - klog.ErrorS(&errObjectName{err}, "Failed to delete") + c.logger.Error(&errObjectName{err}, "Failed to delete") return } @@ -218,7 +223,7 @@ func (c *assumeCache) delete(obj interface{}) { objInfo := &objInfo{name: name} err = c.store.Delete(objInfo) if err != nil { - klog.ErrorS(err, "Failed to delete", "description", c.description, "cacheKey", name) + c.logger.Error(err, "Failed to delete", "description", c.description, "cacheKey", name) } } @@ -280,14 +285,14 @@ func (c *assumeCache) List(indexObj interface{}) []interface{} { allObjs := []interface{}{} objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj}) if err != nil { - klog.ErrorS(err, "List index error") + c.logger.Error(err, "List index error") return nil } for _, obj := range objs { objInfo, ok := obj.(*objInfo) if !ok { - klog.ErrorS(&errWrongType{"objInfo", obj}, "List error") + c.logger.Error(&errWrongType{"objInfo", obj}, "List error") continue } allObjs = append(allObjs, objInfo.latestObj) @@ -325,7 +330,7 @@ func (c *assumeCache) Assume(obj interface{}) error { // Only update the cached object objInfo.latestObj = obj - klog.V(4).InfoS("Assumed object", "description", c.description, "cacheKey", name, "version", newVersion) + c.logger.V(4).Info("Assumed object", "description", c.description, "cacheKey", name, "version", newVersion) return nil } @@ -336,10 +341,10 @@ func (c *assumeCache) Restore(objName string) { objInfo, err := c.getObjInfo(objName) if err != nil { // This could be expected if object got deleted - klog.V(5).InfoS("Restore object", "description", c.description, "cacheKey", objName, "err", err) + c.logger.V(5).Info("Restore object", "description", c.description, "cacheKey", objName, "err", err) } else { objInfo.latestObj = objInfo.apiObj - klog.V(4).InfoS("Restored object", "description", c.description, "cacheKey", objName) + c.logger.V(4).Info("Restored object", "description", c.description, "cacheKey", objName) } } @@ -354,6 +359,7 @@ type PVAssumeCache interface { type pvAssumeCache struct { AssumeCache + logger klog.Logger } func pvStorageClassIndexFunc(obj interface{}) ([]string, error) { @@ -364,8 +370,12 @@ func pvStorageClassIndexFunc(obj interface{}) ([]string, error) { } // NewPVAssumeCache creates a PV assume cache. -func NewPVAssumeCache(informer cache.SharedIndexInformer) PVAssumeCache { - return &pvAssumeCache{NewAssumeCache(informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc)} +func NewPVAssumeCache(logger klog.Logger, informer cache.SharedIndexInformer) PVAssumeCache { + logger = klog.LoggerWithName(logger, "PV Cache") + return &pvAssumeCache{ + AssumeCache: NewAssumeCache(logger, informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc), + logger: logger, + } } func (c *pvAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) { @@ -403,7 +413,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume for _, obj := range objs { pv, ok := obj.(*v1.PersistentVolume) if !ok { - klog.ErrorS(&errWrongType{"v1.PersistentVolume", obj}, "ListPVs") + c.logger.Error(&errWrongType{"v1.PersistentVolume", obj}, "ListPVs") continue } pvs = append(pvs, pv) @@ -423,11 +433,16 @@ type PVCAssumeCache interface { type pvcAssumeCache struct { AssumeCache + logger klog.Logger } // NewPVCAssumeCache creates a PVC assume cache. -func NewPVCAssumeCache(informer cache.SharedIndexInformer) PVCAssumeCache { - return &pvcAssumeCache{NewAssumeCache(informer, "v1.PersistentVolumeClaim", "", nil)} +func NewPVCAssumeCache(logger klog.Logger, informer cache.SharedIndexInformer) PVCAssumeCache { + logger = klog.LoggerWithName(logger, "PVC Cache") + return &pvcAssumeCache{ + AssumeCache: NewAssumeCache(logger, informer, "v1.PersistentVolumeClaim", "", nil), + logger: logger, + } } func (c *pvcAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) { diff --git a/pkg/scheduler/framework/plugins/volumebinding/assume_cache_test.go b/pkg/scheduler/framework/plugins/volumebinding/assume_cache_test.go index 49fcb1cee852f..7391a412f8905 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/assume_cache_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/assume_cache_test.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-helpers/storage/volume" + "k8s.io/klog/v2/ktesting" ) func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) { @@ -53,6 +54,7 @@ func verifyPV(cache PVAssumeCache, name string, expectedPV *v1.PersistentVolume) } func TestAssumePV(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) scenarios := map[string]struct { oldPV *v1.PersistentVolume newPV *v1.PersistentVolume @@ -96,7 +98,7 @@ func TestAssumePV(t *testing.T) { } for name, scenario := range scenarios { - cache := NewPVAssumeCache(nil) + cache := NewPVAssumeCache(logger, nil) internalCache, ok := cache.(*pvAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -130,7 +132,8 @@ func TestAssumePV(t *testing.T) { } func TestRestorePV(t *testing.T) { - cache := NewPVAssumeCache(nil) + logger, _ := ktesting.NewTestContext(t) + cache := NewPVAssumeCache(logger, nil) internalCache, ok := cache.(*pvAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -170,7 +173,8 @@ func TestRestorePV(t *testing.T) { } func TestBasicPVCache(t *testing.T) { - cache := NewPVAssumeCache(nil) + logger, _ := ktesting.NewTestContext(t) + cache := NewPVAssumeCache(logger, nil) internalCache, ok := cache.(*pvAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -214,7 +218,8 @@ func TestBasicPVCache(t *testing.T) { } func TestPVCacheWithStorageClasses(t *testing.T) { - cache := NewPVAssumeCache(nil) + logger, _ := ktesting.NewTestContext(t) + cache := NewPVAssumeCache(logger, nil) internalCache, ok := cache.(*pvAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -260,7 +265,8 @@ func TestPVCacheWithStorageClasses(t *testing.T) { } func TestAssumeUpdatePVCache(t *testing.T) { - cache := NewPVAssumeCache(nil) + logger, _ := ktesting.NewTestContext(t) + cache := NewPVAssumeCache(logger, nil) internalCache, ok := cache.(*pvAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -315,6 +321,7 @@ func verifyPVC(cache PVCAssumeCache, pvcKey string, expectedPVC *v1.PersistentVo } func TestAssumePVC(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) scenarios := map[string]struct { oldPVC *v1.PersistentVolumeClaim newPVC *v1.PersistentVolumeClaim @@ -353,7 +360,7 @@ func TestAssumePVC(t *testing.T) { } for name, scenario := range scenarios { - cache := NewPVCAssumeCache(nil) + cache := NewPVCAssumeCache(logger, nil) internalCache, ok := cache.(*pvcAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -387,7 +394,8 @@ func TestAssumePVC(t *testing.T) { } func TestRestorePVC(t *testing.T) { - cache := NewPVCAssumeCache(nil) + logger, _ := ktesting.NewTestContext(t) + cache := NewPVCAssumeCache(logger, nil) internalCache, ok := cache.(*pvcAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") @@ -427,7 +435,8 @@ func TestRestorePVC(t *testing.T) { } func TestAssumeUpdatePVCCache(t *testing.T) { - cache := NewPVCAssumeCache(nil) + logger, _ := ktesting.NewTestContext(t) + cache := NewPVCAssumeCache(logger, nil) internalCache, ok := cache.(*pvcAssumeCache).AssumeCache.(*assumeCache) if !ok { t.Fatalf("Failed to get internal cache") diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index d035b16721beb..f6ce916c6bfe1 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -149,7 +149,7 @@ type InTreeToCSITranslator interface { type SchedulerVolumeBinder interface { // GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning), // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. - GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) + GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) // GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be // potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used @@ -157,7 +157,7 @@ type SchedulerVolumeBinder interface { // // If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made // and all nodes should be considered. - GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) + GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the // node and returns pod's volumes information. @@ -172,7 +172,7 @@ type SchedulerVolumeBinder interface { // for volumes that still need to be created. // // This function is called by the scheduler VolumeBinding plugin and can be called in parallel - FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) + FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) // AssumePodVolumes will: // 1. Take the PV matches for unbound PVCs and update the PV cache assuming @@ -183,7 +183,7 @@ type SchedulerVolumeBinder interface { // It returns true if all volumes are fully bound // // This function is called serially. - AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) + AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) // RevertAssumedPodVolumes will revert assumed PV and PVC cache. RevertAssumedPodVolumes(podVolumes *PodVolumes) @@ -244,6 +244,7 @@ type CapacityCheck struct { // // capacityCheck determines how storage capacity is checked (CSIStorageCapacity feature). func NewVolumeBinder( + logger klog.Logger, kubeClient clientset.Interface, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, @@ -259,8 +260,8 @@ func NewVolumeBinder( classLister: storageClassInformer.Lister(), nodeLister: nodeInformer.Lister(), csiNodeLister: csiNodeInformer.Lister(), - pvcCache: NewPVCAssumeCache(pvcInformer.Informer()), - pvCache: NewPVAssumeCache(pvInformer.Informer()), + pvcCache: NewPVCAssumeCache(logger, pvcInformer.Informer()), + pvCache: NewPVAssumeCache(logger, pvInformer.Informer()), bindTimeout: bindTimeout, translator: csitrans.New(), } @@ -274,11 +275,11 @@ func NewVolumeBinder( // FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs // for the given pod and node. If the node does not fit, conflict reasons are // returned. -func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { +func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { podVolumes = &PodVolumes{} // Warning: Below log needs high verbosity as it can be printed several times (#60933). - klog.V(5).InfoS("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node)) + logger.V(5).Info("FindPodVolumes", "pod", klog.KObj(pod), "node", klog.KObj(node)) // Initialize to true for pods that don't have volumes. These // booleans get translated into reason strings when the function @@ -330,7 +331,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeCla // Check PV node affinity on bound volumes if len(podVolumeClaims.boundClaims) > 0 { - boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(podVolumeClaims.boundClaims, node, pod) + boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(logger, podVolumeClaims.boundClaims, node, pod) if err != nil { return } @@ -360,7 +361,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeCla // Find matching volumes if len(claimsToFindMatching) > 0 { var unboundClaims []*v1.PersistentVolumeClaim - unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node) + unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(logger, pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node) if err != nil { return } @@ -370,7 +371,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeCla // Check for claims to provision. This is the first time where we potentially // find out that storage is not sufficient for the node. if len(claimsToProvision) > 0 { - unboundVolumesSatisfied, sufficientStorage, dynamicProvisions, err = b.checkVolumeProvisions(pod, claimsToProvision, node) + unboundVolumesSatisfied, sufficientStorage, dynamicProvisions, err = b.checkVolumeProvisions(logger, pod, claimsToProvision, node) if err != nil { return } @@ -386,7 +387,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeCla // // Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes // should be considered. -func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { +func (b *volumeBinder) GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { if len(boundClaims) == 0 { return } @@ -419,12 +420,12 @@ func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) } if len(errs) > 0 { - klog.V(4).InfoS("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs) + logger.V(4).Info("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs) return nil } if eligibleNodes != nil { - klog.V(4).InfoS("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes) + logger.V(4).Info("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes) } return } @@ -434,16 +435,16 @@ func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) // 1. Update the pvCache with the new prebound PV. // 2. Update the pvcCache with the new PVCs with annotations set // 3. Update PodVolumes again with cached API updates for PVs and PVCs. -func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) { - klog.V(4).InfoS("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) +func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (allFullyBound bool, err error) { + logger.V(4).Info("AssumePodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) defer func() { if err != nil { metrics.VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() } }() - if allBound := b.arePodVolumesBound(assumedPod); allBound { - klog.V(4).InfoS("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) + if allBound := b.arePodVolumesBound(logger, assumedPod); allBound { + logger.V(4).Info("AssumePodVolumes: all PVCs bound and nothing to do", "pod", klog.KObj(assumedPod), "node", klog.KRef("", nodeName)) return true, nil } @@ -451,7 +452,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod newBindings := []*BindingInfo{} for _, binding := range podVolumes.StaticBindings { newPV, dirty, err := volume.GetBindVolumeToClaim(binding.pv, binding.pvc) - klog.V(5).InfoS("AssumePodVolumes: GetBindVolumeToClaim", + logger.V(5).Info("AssumePodVolumes: GetBindVolumeToClaim", "pod", klog.KObj(assumedPod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), @@ -459,7 +460,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, pod "dirty", dirty, ) if err != nil { - klog.ErrorS(err, "AssumePodVolumes: fail to GetBindVolumeToClaim") + logger.Error(err, "AssumePodVolumes: fail to GetBindVolumeToClaim") b.revertAssumedPVs(newBindings) return false, err } @@ -506,7 +507,8 @@ func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) { // makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound // by the PV controller. func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) (err error) { - klog.V(4).InfoS("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName)) + logger := klog.FromContext(ctx) + logger.V(4).Info("BindPodVolumes", "pod", klog.KObj(assumedPod), "node", klog.KRef("", assumedPod.Spec.NodeName)) defer func() { if err != nil { @@ -524,7 +526,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p } err = wait.PollUntilContextTimeout(ctx, time.Second, b.bindTimeout, false, func(ctx context.Context) (bool, error) { - b, err := b.checkBindings(assumedPod, bindings, claimsToProvision) + b, err := b.checkBindings(logger, assumedPod, bindings, claimsToProvision) return b, err }) if err != nil { @@ -543,6 +545,7 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string { // bindAPIUpdate makes the API update for those PVs/PVCs. func (b *volumeBinder) bindAPIUpdate(ctx context.Context, pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error { + logger := klog.FromContext(ctx) podName := getPodName(pod) if bindings == nil { return fmt.Errorf("failed to get cached bindings for pod %q", podName) @@ -574,14 +577,14 @@ func (b *volumeBinder) bindAPIUpdate(ctx context.Context, pod *v1.Pod, bindings // There is no API rollback if the actual binding fails for _, binding = range bindings { // TODO: does it hurt if we make an api call and nothing needs to be updated? - klog.V(5).InfoS("Updating PersistentVolume: binding to claim", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) + logger.V(5).Info("Updating PersistentVolume: binding to claim", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(ctx, binding.pv, metav1.UpdateOptions{}) if err != nil { - klog.V(4).InfoS("Updating PersistentVolume: binding to claim failed", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err) + logger.V(4).Info("Updating PersistentVolume: binding to claim failed", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc), "err", err) return err } - klog.V(2).InfoS("Updated PersistentVolume with claim. Waiting for binding to complete", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) + logger.V(2).Info("Updated PersistentVolume with claim. Waiting for binding to complete", "pod", klog.KObj(pod), "PV", klog.KObj(binding.pv), "PVC", klog.KObj(binding.pvc)) // Save updated object from apiserver for later checking. binding.pv = newPV lastProcessedBinding++ @@ -590,10 +593,10 @@ func (b *volumeBinder) bindAPIUpdate(ctx context.Context, pod *v1.Pod, bindings // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest // PV controller is expected to signal back by removing related annotations if actual provisioning fails for i, claim = range claimsToProvision { - klog.V(5).InfoS("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim)) + logger.V(5).Info("Updating claims objects to trigger volume provisioning", "pod", klog.KObj(pod), "PVC", klog.KObj(claim)) newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) if err != nil { - klog.V(4).InfoS("Updating PersistentVolumeClaim: binding to volume failed", "PVC", klog.KObj(claim), "err", err) + logger.V(4).Info("Updating PersistentVolumeClaim: binding to volume failed", "PVC", klog.KObj(claim), "err", err) return err } @@ -619,7 +622,7 @@ var ( // PV/PVC cache can be assumed again in main scheduler loop, we must check // latest state in API server which are shared with PV controller and // provisioners -func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) { +func (b *volumeBinder) checkBindings(logger klog.Logger, pod *v1.Pod, bindings []*BindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) { podName := getPodName(pod) if bindings == nil { return false, fmt.Errorf("failed to get cached bindings for pod %q", podName) @@ -636,7 +639,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim csiNode, err := b.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default - klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) + logger.V(4).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) } // Check for any conditions that might require scheduling retry @@ -648,7 +651,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim if apierrors.IsNotFound(err) { return false, fmt.Errorf("pod does not exist any more: %w", err) } - klog.ErrorS(err, "Failed to get pod from the lister", "pod", klog.KObj(pod)) + logger.Error(err, "Failed to get pod from the lister", "pod", klog.KObj(pod)) } for _, binding := range bindings { @@ -744,11 +747,11 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim } // All pvs and pvcs that we operated on are bound - klog.V(2).InfoS("All PVCs for pod are bound", "pod", klog.KObj(pod)) + logger.V(2).Info("All PVCs for pod are bound", "pod", klog.KObj(pod)) return true, nil } -func (b *volumeBinder) isVolumeBound(pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) { +func (b *volumeBinder) isVolumeBound(logger klog.Logger, pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) { pvcName := "" isEphemeral := false switch { @@ -763,7 +766,7 @@ func (b *volumeBinder) isVolumeBound(pod *v1.Pod, vol *v1.Volume) (bound bool, p return true, nil, nil } - bound, pvc, err = b.isPVCBound(pod.Namespace, pvcName) + bound, pvc, err = b.isPVCBound(logger, pod.Namespace, pvcName) // ... the PVC must be owned by the pod. if isEphemeral && err == nil && pvc != nil { if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil { @@ -773,7 +776,7 @@ func (b *volumeBinder) isVolumeBound(pod *v1.Pod, vol *v1.Volume) (bound bool, p return } -func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) { +func (b *volumeBinder) isPVCBound(logger klog.Logger, namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) { claim := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, @@ -788,12 +791,12 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste fullyBound := b.isPVCFullyBound(pvc) if fullyBound { - klog.V(5).InfoS("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) + logger.V(5).Info("PVC is fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) } else { if pvc.Spec.VolumeName != "" { - klog.V(5).InfoS("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) + logger.V(5).Info("PVC is not fully bound to PV", "PVC", klog.KObj(pvc), "PV", klog.KRef("", pvc.Spec.VolumeName)) } else { - klog.V(5).InfoS("PVC is not bound", "PVC", klog.KObj(pvc)) + logger.V(5).Info("PVC is not bound", "PVC", klog.KObj(pvc)) } } return fullyBound, pvc, nil @@ -804,9 +807,9 @@ func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool { } // arePodVolumesBound returns true if all volumes are fully bound -func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { +func (b *volumeBinder) arePodVolumesBound(logger klog.Logger, pod *v1.Pod) bool { for _, vol := range pod.Spec.Volumes { - if isBound, _, _ := b.isVolumeBound(pod, &vol); !isBound { + if isBound, _, _ := b.isVolumeBound(logger, pod, &vol); !isBound { // Pod has at least one PVC that needs binding return false } @@ -816,7 +819,7 @@ func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { // GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning), // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. -func (b *volumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { +func (b *volumeBinder) GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { podVolumeClaims = &PodVolumeClaims{ boundClaims: []*v1.PersistentVolumeClaim{}, unboundClaimsImmediate: []*v1.PersistentVolumeClaim{}, @@ -824,7 +827,7 @@ func (b *volumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolu } for _, vol := range pod.Spec.Volumes { - volumeBound, pvc, err := b.isVolumeBound(pod, &vol) + volumeBound, pvc, err := b.isVolumeBound(logger, pod, &vol) if err != nil { return podVolumeClaims, err } @@ -859,11 +862,11 @@ func (b *volumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolu return podVolumeClaims, nil } -func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) { +func (b *volumeBinder) checkBoundClaims(logger klog.Logger, claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) { csiNode, err := b.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default - klog.V(4).InfoS("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) + logger.V(4).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) } for _, pvc := range claims { @@ -883,19 +886,19 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node err = volume.CheckNodeAffinity(pv, node.Labels) if err != nil { - klog.V(4).InfoS("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err) + logger.V(4).Info("PersistentVolume and node mismatch for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod), "err", err) return false, true, nil } - klog.V(5).InfoS("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod)) + logger.V(5).Info("PersistentVolume and node matches for pod", "PV", klog.KRef("", pvName), "node", klog.KObj(node), "pod", klog.KObj(pod)) } - klog.V(4).InfoS("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node)) + logger.V(4).Info("All bound volumes for pod match with node", "pod", klog.KObj(pod), "node", klog.KObj(node)) return true, true, nil } // findMatchingVolumes tries to find matching volumes for given claims, // and return unbound claims for further provision. -func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { +func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) @@ -914,7 +917,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi return false, nil, nil, err } if pv == nil { - klog.V(4).InfoS("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node)) + logger.V(4).Info("No matching volumes for pod", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc), "node", klog.KObj(node)) unboundClaims = append(unboundClaims, pvc) foundMatches = false continue @@ -923,11 +926,11 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi // matching PV needs to be excluded so we don't select it again chosenPVs[pv.Name] = pv bindings = append(bindings, &BindingInfo{pv: pv, pvc: pvc}) - klog.V(5).InfoS("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod)) + logger.V(5).Info("Found matching PV for PVC for pod", "PV", klog.KObj(pv), "PVC", klog.KObj(pvc), "node", klog.KObj(node), "pod", klog.KObj(pod)) } if foundMatches { - klog.V(4).InfoS("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node)) + logger.V(4).Info("Found matching volumes for pod", "pod", klog.KObj(pod), "node", klog.KObj(node)) } return @@ -936,7 +939,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi // checkVolumeProvisions checks given unbound claims (the claims have gone through func // findMatchingVolumes, and do not have matching volumes for binding), and return true // if all of the claims are eligible for dynamic provision. -func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) { +func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) { dynamicProvisions = []*v1.PersistentVolumeClaim{} // We return early with provisionedClaims == nil if a check @@ -954,18 +957,18 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v } provisioner := class.Provisioner if provisioner == "" || provisioner == volume.NotSupportedProvisioner { - klog.V(4).InfoS("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim)) + logger.V(4).Info("Storage class of claim does not support dynamic provisioning", "storageClassName", className, "PVC", klog.KObj(claim)) return false, true, nil, nil } // Check if the node can satisfy the topology requirement in the class if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { - klog.V(4).InfoS("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim)) + logger.V(4).Info("Node cannot satisfy provisioning topology requirements of claim", "node", klog.KObj(node), "PVC", klog.KObj(claim)) return false, true, nil, nil } // Check storage capacity. - sufficient, err := b.hasEnoughCapacity(provisioner, claim, class, node) + sufficient, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node) if err != nil { return false, false, nil, err } @@ -977,7 +980,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v dynamicProvisions = append(dynamicProvisions, claim) } - klog.V(4).InfoS("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node)) + logger.V(4).Info("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node)) return true, true, dynamicProvisions, nil } @@ -996,7 +999,7 @@ func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) { // hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size // that is available from the node. -func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, error) { +func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, error) { quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage] if !ok { // No capacity to check for. @@ -1029,7 +1032,7 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten for _, capacity := range capacities { if capacity.StorageClassName == storageClass.Name && capacitySufficient(capacity, sizeInBytes) && - b.nodeHasAccess(node, capacity) { + b.nodeHasAccess(logger, node, capacity) { // Enough capacity found. return true, nil } @@ -1037,7 +1040,7 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten // TODO (?): this doesn't give any information about which pools where considered and why // they had to be rejected. Log that above? But that might be a lot of log output... - klog.V(4).InfoS("Node has no accessible CSIStorageCapacity with enough capacity for PVC", + logger.V(4).Info("Node has no accessible CSIStorageCapacity with enough capacity for PVC", "node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass)) return false, nil } @@ -1051,7 +1054,7 @@ func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int6 return limit != nil && limit.Value() >= sizeInBytes } -func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1.CSIStorageCapacity) bool { +func (b *volumeBinder) nodeHasAccess(logger klog.Logger, node *v1.Node, capacity *storagev1.CSIStorageCapacity) bool { if capacity.NodeTopology == nil { // Unavailable return false @@ -1059,7 +1062,7 @@ func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1.CSIStora // Only matching by label is supported. selector, err := metav1.LabelSelectorAsSelector(capacity.NodeTopology) if err != nil { - klog.ErrorS(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology) + logger.Error(err, "Unexpected error converting to a label selector", "nodeTopology", capacity.NodeTopology) return false } return selector.Matches(labels.Set(node.Labels)) diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go index f053900c75bac..1746780ce2ebc 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go @@ -44,6 +44,7 @@ import ( "k8s.io/component-helpers/storage/volume" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" + _ "k8s.io/klog/v2/ktesting/init" "k8s.io/kubernetes/pkg/controller" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" ) @@ -129,10 +130,6 @@ var ( zone1Labels = map[string]string{v1.LabelFailureDomainBetaZone: "us-east-1", v1.LabelFailureDomainBetaRegion: "us-east-1a"} ) -func init() { - klog.InitFlags(nil) -} - type testEnv struct { client clientset.Interface reactor *pvtesting.VolumeReactor @@ -149,9 +146,9 @@ type testEnv struct { internalCSIStorageCapacityInformer storageinformers.CSIStorageCapacityInformer } -func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { +func newTestBinder(t *testing.T, ctx context.Context) *testEnv { client := &fake.Clientset{} - _, ctx := ktesting.NewTestContext(t) + logger := klog.FromContext(ctx) reactor := pvtesting.NewVolumeReactor(ctx, client, nil, nil, nil) // TODO refactor all tests to use real watch mechanism, see #72327 client.AddWatchReactor("*", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { @@ -177,6 +174,7 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { CSIStorageCapacityInformer: csiStorageCapacityInformer, } binder := NewVolumeBinder( + logger, client, podInformer, nodeInformer, @@ -188,10 +186,10 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { 10*time.Second) // Wait for informers cache sync - informerFactory.Start(stopCh) - for v, synced := range informerFactory.WaitForCacheSync(stopCh) { + informerFactory.Start(ctx.Done()) + for v, synced := range informerFactory.WaitForCacheSync(ctx.Done()) { if !synced { - klog.ErrorS(nil, "Error syncing informer", "informer", v) + logger.Error(nil, "Error syncing informer", "informer", v) os.Exit(1) } } @@ -846,15 +844,15 @@ func checkReasons(t *testing.T, actual, expected ConflictReasons) { } // findPodVolumes gets and finds volumes for given pod and node -func findPodVolumes(binder SchedulerVolumeBinder, pod *v1.Pod, node *v1.Node) (*PodVolumes, ConflictReasons, error) { - podVolumeClaims, err := binder.GetPodVolumeClaims(pod) +func findPodVolumes(logger klog.Logger, binder SchedulerVolumeBinder, pod *v1.Pod, node *v1.Node) (*PodVolumes, ConflictReasons, error) { + podVolumeClaims, err := binder.GetPodVolumeClaims(logger, pod) if err != nil { return nil, nil, err } if len(podVolumeClaims.unboundClaimsImmediate) > 0 { return nil, nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") } - return binder.FindPodVolumes(pod, podVolumeClaims, node) + return binder.FindPodVolumes(logger, pod, podVolumeClaims, node) } func TestFindPodVolumesWithoutProvisioning(t *testing.T) { @@ -1006,11 +1004,12 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { } run := func(t *testing.T, scenario scenarioType, csiDriver *storagev1.CSIDriver) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initVolumes(scenario.pvs, scenario.pvs) if csiDriver != nil { testEnv.addCSIDriver(csiDriver) @@ -1031,7 +1030,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { } // Execute - podVolumes, reasons, err := findPodVolumes(testEnv.binder, scenario.pod, testNode) + podVolumes, reasons, err := findPodVolumes(logger, testEnv.binder, scenario.pod, testNode) // Validate if !scenario.shouldFail && err != nil { @@ -1133,11 +1132,12 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { } run := func(t *testing.T, scenario scenarioType, csiDriver *storagev1.CSIDriver) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initVolumes(scenario.pvs, scenario.pvs) if csiDriver != nil { testEnv.addCSIDriver(csiDriver) @@ -1158,7 +1158,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { } // Execute - podVolumes, reasons, err := findPodVolumes(testEnv.binder, scenario.pod, testNode) + podVolumes, reasons, err := findPodVolumes(logger, testEnv.binder, scenario.pod, testNode) // Validate if !scenario.shouldFail && err != nil { @@ -1240,11 +1240,12 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initVolumes(scenario.pvs, scenario.pvs) var node *v1.Node @@ -1274,7 +1275,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { } // Execute - _, reasons, err := findPodVolumes(testEnv.binder, scenario.pod, node) + _, reasons, err := findPodVolumes(logger, testEnv.binder, scenario.pod, node) // Validate if !scenario.shouldFail && err != nil { @@ -1357,11 +1358,12 @@ func TestAssumePodVolumes(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initClaims(scenario.podPVCs, scenario.podPVCs) pod := makePod("test-pod"). withNamespace("testns"). @@ -1374,7 +1376,7 @@ func TestAssumePodVolumes(t *testing.T) { testEnv.initVolumes(scenario.pvs, scenario.pvs) // Execute - allBound, err := testEnv.binder.AssumePodVolumes(pod, "node1", podVolumes) + allBound, err := testEnv.binder.AssumePodVolumes(logger, pod, "node1", podVolumes) // Validate if !scenario.shouldFail && err != nil { @@ -1406,7 +1408,8 @@ func TestAssumePodVolumes(t *testing.T) { } func TestRevertAssumedPodVolumes(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() podPVCs := []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC} @@ -1417,7 +1420,7 @@ func TestRevertAssumedPodVolumes(t *testing.T) { expectedProvisionings := []*v1.PersistentVolumeClaim{selectedNodePVC} // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initClaims(podPVCs, podPVCs) pod := makePod("test-pod"). withNamespace("testns"). @@ -1429,7 +1432,7 @@ func TestRevertAssumedPodVolumes(t *testing.T) { } testEnv.initVolumes(pvs, pvs) - allbound, err := testEnv.binder.AssumePodVolumes(pod, "node1", podVolumes) + allbound, err := testEnv.binder.AssumePodVolumes(logger, pod, "node1", podVolumes) if allbound || err != nil { t.Errorf("No volumes are assumed") } @@ -1534,11 +1537,12 @@ func TestBindAPIUpdate(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) pod := makePod("test-pod"). withNamespace("testns"). withNodeName("node1").Pod @@ -1732,13 +1736,14 @@ func TestCheckBindings(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup pod := makePod("test-pod"). withNamespace("testns"). withNodeName("node1").Pod - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.internalPodInformer.Informer().GetIndexer().Add(pod) testEnv.initNodes([]*v1.Node{node1}) testEnv.initVolumes(scenario.initPVs, nil) @@ -1762,7 +1767,7 @@ func TestCheckBindings(t *testing.T) { } // Execute - allBound, err := testEnv.internalBinder.checkBindings(pod, scenario.bindings, scenario.provisionedPVCs) + allBound, err := testEnv.internalBinder.checkBindings(logger, pod, scenario.bindings, scenario.provisionedPVCs) // Validate if !scenario.shouldFail && err != nil { @@ -1857,14 +1862,15 @@ func TestCheckBindingsWithCSIMigration(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup pod := makePod("test-pod"). withNamespace("testns"). withNodeName("node1").Pod - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.internalPodInformer.Informer().GetIndexer().Add(pod) testEnv.initNodes(scenario.initNodes) testEnv.initCSINodes(scenario.initCSINodes) @@ -1881,7 +1887,7 @@ func TestCheckBindingsWithCSIMigration(t *testing.T) { } // Execute - allBound, err := testEnv.internalBinder.checkBindings(pod, scenario.bindings, scenario.provisionedPVCs) + allBound, err := testEnv.internalBinder.checkBindings(logger, pod, scenario.bindings, scenario.provisionedPVCs) // Validate if !scenario.shouldFail && err != nil { @@ -2047,13 +2053,14 @@ func TestBindPodVolumes(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup pod := makePod("test-pod"). withNamespace("testns"). withNodeName("node1").Pod - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.internalPodInformer.Informer().GetIndexer().Add(pod) if scenario.nodes == nil { scenario.nodes = []*v1.Node{node1} @@ -2091,7 +2098,7 @@ func TestBindPodVolumes(t *testing.T) { go func(scenario scenarioType) { time.Sleep(5 * time.Second) // Sleep a while to run after bindAPIUpdate in BindPodVolumes - klog.V(5).InfoS("Running delay function") + logger.V(5).Info("Running delay function") scenario.delayFunc(t, ctx, testEnv, pod, scenario.initPVs, scenario.initPVCs) }(scenario) } @@ -2127,9 +2134,10 @@ func TestFindAssumeVolumes(t *testing.T) { pvs := []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1c} // Setup - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initVolumes(pvs, pvs) testEnv.initClaims(podPVCs, podPVCs) pod := makePod("test-pod"). @@ -2148,7 +2156,7 @@ func TestFindAssumeVolumes(t *testing.T) { // Execute // 1. Find matching PVs - podVolumes, reasons, err := findPodVolumes(testEnv.binder, pod, testNode) + podVolumes, reasons, err := findPodVolumes(logger, testEnv.binder, pod, testNode) if err != nil { t.Errorf("Test failed: FindPodVolumes returned error: %v", err) } @@ -2158,7 +2166,7 @@ func TestFindAssumeVolumes(t *testing.T) { expectedBindings := podVolumes.StaticBindings // 2. Assume matches - allBound, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name, podVolumes) + allBound, err := testEnv.binder.AssumePodVolumes(logger, pod, testNode.Name, podVolumes) if err != nil { t.Errorf("Test failed: AssumePodVolumes returned error: %v", err) } @@ -2174,7 +2182,7 @@ func TestFindAssumeVolumes(t *testing.T) { // This should always return the original chosen pv // Run this many times in case sorting returns different orders for the two PVs. for i := 0; i < 50; i++ { - podVolumes, reasons, err := findPodVolumes(testEnv.binder, pod, testNode) + podVolumes, reasons, err := findPodVolumes(logger, testEnv.binder, pod, testNode) if err != nil { t.Errorf("Test failed: FindPodVolumes returned error: %v", err) } @@ -2283,11 +2291,12 @@ func TestCapacity(t *testing.T) { } run := func(t *testing.T, scenario scenarioType, optIn bool) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup: the driver has the feature enabled, but the scheduler might not. - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.addCSIDriver(makeCSIDriver(provisioner, optIn)) testEnv.addCSIStorageCapacities(scenario.capacities) @@ -2301,7 +2310,7 @@ func TestCapacity(t *testing.T) { withPVCSVolume(scenario.pvcs).Pod // Execute - podVolumes, reasons, err := findPodVolumes(testEnv.binder, pod, testNode) + podVolumes, reasons, err := findPodVolumes(logger, testEnv.binder, pod, testNode) // Validate shouldFail := scenario.shouldFail @@ -2431,18 +2440,19 @@ func TestGetEligibleNodes(t *testing.T) { } run := func(t *testing.T, scenario scenarioType) { - ctx, cancel := context.WithCancel(context.Background()) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() // Setup - testEnv := newTestBinder(t, ctx.Done()) + testEnv := newTestBinder(t, ctx) testEnv.initVolumes(scenario.pvs, scenario.pvs) testEnv.initNodes(scenario.nodes) testEnv.initClaims(scenario.pvcs, scenario.pvcs) // Execute - eligibleNodes := testEnv.binder.GetEligibleNodes(scenario.pvcs) + eligibleNodes := testEnv.binder.GetEligibleNodes(logger, scenario.pvcs) // Validate if reflect.DeepEqual(scenario.eligibleNodes, eligibleNodes) { diff --git a/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go index b8e78b8bea120..667669c65b44c 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go @@ -21,6 +21,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" ) // FakeVolumeBinderConfig holds configurations for fake volume binder. @@ -50,22 +51,22 @@ type FakeVolumeBinder struct { var _ SchedulerVolumeBinder = &FakeVolumeBinder{} // GetPodVolumeClaims implements SchedulerVolumeBinder.GetPodVolumes. -func (b *FakeVolumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { +func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) { return &PodVolumeClaims{}, nil } // GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes. -func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { +func (b *FakeVolumeBinder) GetEligibleNodes(_ klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { return nil } // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. -func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { +func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { return nil, b.config.FindReasons, b.config.FindErr } // AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes. -func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (bool, error) { +func (b *FakeVolumeBinder) AssumePodVolumes(_ klog.Logger, assumedPod *v1.Pod, nodeName string, podVolumes *PodVolumes) (bool, error) { b.AssumeCalled = true return b.config.AllBound, b.config.AssumeErr } diff --git a/pkg/scheduler/framework/plugins/volumebinding/test_utils.go b/pkg/scheduler/framework/plugins/volumebinding/test_utils.go index 4ec84825b7e17..a1c968d76fc6c 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/test_utils.go +++ b/pkg/scheduler/framework/plugins/volumebinding/test_utils.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-helpers/storage/volume" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) type nodeBuilder struct { @@ -115,7 +115,7 @@ func makePVC(name string, storageClassName string) pvcBuilder { Namespace: v1.NamespaceDefault, }, Spec: v1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String(storageClassName), + StorageClassName: ptr.To(storageClassName), }, }} } diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 66756b7af134d..a8f3a596d17c2 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -161,6 +161,7 @@ func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) { // immediate PVCs bound. If not all immediate PVCs are bound, an // UnschedulableAndUnresolvable is returned. func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + logger := klog.FromContext(ctx) // If pod does not reference any PVC, we don't need to do anything. if hasPVC, err := pl.podHasPVCs(pod); err != nil { return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) @@ -168,7 +169,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt state.Write(stateKey, &stateData{}) return nil, framework.NewStatus(framework.Skip) } - podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(pod) + podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(logger, pod) if err != nil { return nil, framework.AsStatus(err) } @@ -182,7 +183,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt } // Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims. var result *framework.PreFilterResult - if eligibleNodes := pl.Binder.GetEligibleNodes(podVolumeClaims.boundClaims); eligibleNodes != nil { + if eligibleNodes := pl.Binder.GetEligibleNodes(logger, podVolumeClaims.boundClaims); eligibleNodes != nil { result = &framework.PreFilterResult{ NodeNames: eligibleNodes, } @@ -232,6 +233,7 @@ func getStateData(cs *framework.CycleState) (*stateData, error) { // The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound // PVCs can be matched with an available and node-compatible PV. func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { + logger := klog.FromContext(ctx) node := nodeInfo.Node() state, err := getStateData(cs) @@ -239,7 +241,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p return framework.AsStatus(err) } - podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.podVolumeClaims, node) + podVolumes, reasons, err := pl.Binder.FindPodVolumes(logger, pod, state.podVolumeClaims, node) if err != nil { return framework.AsStatus(err) @@ -304,7 +306,7 @@ func (pl *VolumeBinding) Reserve(ctx context.Context, cs *framework.CycleState, // we don't need to hold the lock as only one node will be reserved for the given pod podVolumes, ok := state.podVolumesByNode[nodeName] if ok { - allBound, err := pl.Binder.AssumePodVolumes(pod, nodeName, podVolumes) + allBound, err := pl.Binder.AssumePodVolumes(klog.FromContext(ctx), pod, nodeName, podVolumes) if err != nil { return framework.AsStatus(err) } @@ -335,13 +337,14 @@ func (pl *VolumeBinding) PreBind(ctx context.Context, cs *framework.CycleState, if !ok { return framework.AsStatus(fmt.Errorf("no pod volumes found for node %q", nodeName)) } - klog.V(5).InfoS("Trying to bind volumes for pod", "pod", klog.KObj(pod)) + logger := klog.FromContext(ctx) + logger.V(5).Info("Trying to bind volumes for pod", "pod", klog.KObj(pod)) err = pl.Binder.BindPodVolumes(ctx, pod, podVolumes) if err != nil { - klog.V(1).InfoS("Failed to bind volumes for pod", "pod", klog.KObj(pod), "err", err) + logger.V(5).Info("Failed to bind volumes for pod", "pod", klog.KObj(pod), "err", err) return framework.AsStatus(err) } - klog.V(5).InfoS("Success binding volumes for pod", "pod", klog.KObj(pod)) + logger.V(5).Info("Success binding volumes for pod", "pod", klog.KObj(pod)) return nil } @@ -361,7 +364,7 @@ func (pl *VolumeBinding) Unreserve(ctx context.Context, cs *framework.CycleState } // New initializes a new plugin and returns it. -func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { +func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { args, ok := plArgs.(*config.VolumeBindingArgs) if !ok { return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs) @@ -381,7 +384,7 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(), CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1().CSIStorageCapacities(), } - binder := NewVolumeBinder(fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second) + binder := NewVolumeBinder(klog.FromContext(ctx), fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second) // build score function var scorer volumeCapacityScorer diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go index b17244a0491e2..96dc2096d2897 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go @@ -806,7 +806,7 @@ func TestVolumeBinding(t *testing.T) { } } - pl, err := New(args, fh, item.fts) + pl, err := New(ctx, args, fh, item.fts) if err != nil { t.Fatal(err) } diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go index cfe883bd83f5c..80c35f44f2835 100644 --- a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go +++ b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go @@ -33,9 +33,8 @@ import ( // VolumeRestrictions is a plugin that checks volume restrictions. type VolumeRestrictions struct { - pvcLister corelisters.PersistentVolumeClaimLister - sharedLister framework.SharedLister - enableReadWriteOncePod bool + pvcLister corelisters.PersistentVolumeClaimLister + sharedLister framework.SharedLister } var _ framework.PreFilterPlugin = &VolumeRestrictions{} @@ -169,13 +168,6 @@ func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framewo } } - if !pl.enableReadWriteOncePod { - if needsCheck { - return nil, nil - } - return nil, framework.NewStatus(framework.Skip) - } - pvcs, err := pl.readWriteOncePodPVCsForPod(ctx, pod) if err != nil { if apierrors.IsNotFound(err) { @@ -198,9 +190,6 @@ func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framewo // AddPod from pre-computed data in cycleState. func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - if !pl.enableReadWriteOncePod { - return nil - } state, err := getPreFilterState(cycleState) if err != nil { return framework.AsStatus(err) @@ -211,9 +200,6 @@ func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState *framework. // RemovePod from pre-computed data in cycleState. func (pl *VolumeRestrictions) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - if !pl.enableReadWriteOncePod { - return nil - } state, err := getPreFilterState(cycleState) if err != nil { return framework.AsStatus(err) @@ -321,9 +307,6 @@ func (pl *VolumeRestrictions) Filter(ctx context.Context, cycleState *framework. if !satisfyVolumeConflicts(pod, nodeInfo) { return framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) } - if !pl.enableReadWriteOncePod { - return nil - } state, err := getPreFilterState(cycleState) if err != nil { return framework.AsStatus(err) @@ -348,14 +331,13 @@ func (pl *VolumeRestrictions) EventsToRegister() []framework.ClusterEventWithHin } // New initializes a new plugin and returns it. -func New(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() sharedLister := handle.SnapshotSharedLister() return &VolumeRestrictions{ - pvcLister: pvcLister, - sharedLister: sharedLister, - enableReadWriteOncePod: fts.EnableReadWriteOncePod, + pvcLister: pvcLister, + sharedLister: sharedLister, }, nil } diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go index a78ab1e3b3ad9..61f699779e435 100644 --- a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go +++ b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go @@ -24,9 +24,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" @@ -355,8 +352,6 @@ func TestISCSIDiskConflicts(t *testing.T) { } func TestAccessModeConflicts(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() - // Required for querying lister for PVCs in the same namespace. podWithOnePVC := st.MakePod().Name("pod-with-one-pvc").Namespace(metav1.NamespaceDefault).PVC("claim-with-rwop-1").Node("node-1").Obj() podWithTwoPVCs := st.MakePod().Name("pod-with-two-pvcs").Namespace(metav1.NamespaceDefault).PVC("claim-with-rwop-1").PVC("claim-with-rwop-2").Node("node-1").Obj() @@ -401,81 +396,64 @@ func TestAccessModeConflicts(t *testing.T) { } tests := []struct { - name string - pod *v1.Pod - nodeInfo *framework.NodeInfo - existingPods []*v1.Pod - existingNodes []*v1.Node - existingPVCs []*v1.PersistentVolumeClaim - enableReadWriteOncePod bool - preFilterWantStatus *framework.Status - wantStatus *framework.Status + name string + pod *v1.Pod + nodeInfo *framework.NodeInfo + existingPods []*v1.Pod + existingNodes []*v1.Node + existingPVCs []*v1.PersistentVolumeClaim + preFilterWantStatus *framework.Status + wantStatus *framework.Status }{ { - name: "nothing", - pod: &v1.Pod{}, - nodeInfo: framework.NewNodeInfo(), - existingPods: []*v1.Pod{}, - existingNodes: []*v1.Node{}, - existingPVCs: []*v1.PersistentVolumeClaim{}, - enableReadWriteOncePod: true, - preFilterWantStatus: framework.NewStatus(framework.Skip), - wantStatus: nil, - }, - { - name: "nothing, ReadWriteOncePod disabled", - pod: &v1.Pod{}, - nodeInfo: framework.NewNodeInfo(), - existingPods: []*v1.Pod{}, - existingNodes: []*v1.Node{}, - existingPVCs: []*v1.PersistentVolumeClaim{}, - enableReadWriteOncePod: false, - preFilterWantStatus: framework.NewStatus(framework.Skip), - wantStatus: nil, + name: "nothing", + pod: &v1.Pod{}, + nodeInfo: framework.NewNodeInfo(), + existingPods: []*v1.Pod{}, + existingNodes: []*v1.Node{}, + existingPVCs: []*v1.PersistentVolumeClaim{}, + preFilterWantStatus: framework.NewStatus(framework.Skip), + wantStatus: nil, }, { - name: "failed to get PVC", - pod: podWithOnePVC, - nodeInfo: framework.NewNodeInfo(), - existingPods: []*v1.Pod{}, - existingNodes: []*v1.Node{}, - existingPVCs: []*v1.PersistentVolumeClaim{}, - enableReadWriteOncePod: true, - preFilterWantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "persistentvolumeclaim \"claim-with-rwop-1\" not found"), - wantStatus: nil, + name: "failed to get PVC", + pod: podWithOnePVC, + nodeInfo: framework.NewNodeInfo(), + existingPods: []*v1.Pod{}, + existingNodes: []*v1.Node{}, + existingPVCs: []*v1.PersistentVolumeClaim{}, + preFilterWantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "persistentvolumeclaim \"claim-with-rwop-1\" not found"), + wantStatus: nil, }, { - name: "no access mode conflict", - pod: podWithOnePVC, - nodeInfo: framework.NewNodeInfo(podWithReadWriteManyPVC), - existingPods: []*v1.Pod{podWithReadWriteManyPVC}, - existingNodes: []*v1.Node{node}, - existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC1, readWriteManyPVC}, - enableReadWriteOncePod: true, - preFilterWantStatus: framework.NewStatus(framework.Skip), - wantStatus: nil, + name: "no access mode conflict", + pod: podWithOnePVC, + nodeInfo: framework.NewNodeInfo(podWithReadWriteManyPVC), + existingPods: []*v1.Pod{podWithReadWriteManyPVC}, + existingNodes: []*v1.Node{node}, + existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC1, readWriteManyPVC}, + preFilterWantStatus: framework.NewStatus(framework.Skip), + wantStatus: nil, }, { - name: "access mode conflict, unschedulable", - pod: podWithOneConflict, - nodeInfo: framework.NewNodeInfo(podWithOnePVC, podWithReadWriteManyPVC), - existingPods: []*v1.Pod{podWithOnePVC, podWithReadWriteManyPVC}, - existingNodes: []*v1.Node{node}, - existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC1, readWriteManyPVC}, - enableReadWriteOncePod: true, - preFilterWantStatus: nil, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonReadWriteOncePodConflict), + name: "access mode conflict, unschedulable", + pod: podWithOneConflict, + nodeInfo: framework.NewNodeInfo(podWithOnePVC, podWithReadWriteManyPVC), + existingPods: []*v1.Pod{podWithOnePVC, podWithReadWriteManyPVC}, + existingNodes: []*v1.Node{node}, + existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC1, readWriteManyPVC}, + preFilterWantStatus: nil, + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonReadWriteOncePodConflict), }, { - name: "two conflicts, unschedulable", - pod: podWithTwoConflicts, - nodeInfo: framework.NewNodeInfo(podWithTwoPVCs, podWithReadWriteManyPVC), - existingPods: []*v1.Pod{podWithTwoPVCs, podWithReadWriteManyPVC}, - existingNodes: []*v1.Node{node}, - existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC1, readWriteOncePodPVC2, readWriteManyPVC}, - enableReadWriteOncePod: true, - preFilterWantStatus: nil, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonReadWriteOncePodConflict), + name: "two conflicts, unschedulable", + pod: podWithTwoConflicts, + nodeInfo: framework.NewNodeInfo(podWithTwoPVCs, podWithReadWriteManyPVC), + existingPods: []*v1.Pod{podWithTwoPVCs, podWithReadWriteManyPVC}, + existingNodes: []*v1.Node{node}, + existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC1, readWriteOncePodPVC2, readWriteManyPVC}, + preFilterWantStatus: nil, + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonReadWriteOncePodConflict), }, } @@ -483,7 +461,7 @@ func TestAccessModeConflicts(t *testing.T) { t.Run(test.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - p := newPluginWithListers(ctx, t, test.existingPods, test.existingNodes, test.existingPVCs, test.enableReadWriteOncePod) + p := newPluginWithListers(ctx, t, test.existingPods, test.existingNodes, test.existingPVCs) cycleState := framework.NewCycleState() _, preFilterGotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod) if diff := cmp.Diff(test.preFilterWantStatus, preFilterGotStatus); diff != "" { @@ -501,14 +479,12 @@ func TestAccessModeConflicts(t *testing.T) { } func newPlugin(ctx context.Context, t *testing.T) framework.Plugin { - return newPluginWithListers(ctx, t, nil, nil, nil, true) + return newPluginWithListers(ctx, t, nil, nil, nil) } -func newPluginWithListers(ctx context.Context, t *testing.T, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim, enableReadWriteOncePod bool) framework.Plugin { - pluginFactory := func(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { - return New(plArgs, fh, feature.Features{ - EnableReadWriteOncePod: enableReadWriteOncePod, - }) +func newPluginWithListers(ctx context.Context, t *testing.T, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim) framework.Plugin { + pluginFactory := func(ctx context.Context, plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { + return New(ctx, plArgs, fh, feature.Features{}) } snapshot := cache.NewSnapshot(pods, nodes) diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go index b014130b64a99..c1893aa63cc73 100644 --- a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -290,7 +290,7 @@ func (pl *VolumeZone) EventsToRegister() []framework.ClusterEventWithHint { } // New initializes a new plugin and returns it. -func New(_ runtime.Object, handle framework.Handle) (framework.Plugin, error) { +func New(_ context.Context, _ runtime.Object, handle framework.Handle) (framework.Plugin, error) { informerFactory := handle.SharedInformerFactory() pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go index 2d9859c9eb2a6..dfeb024153ff9 100644 --- a/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go @@ -29,10 +29,10 @@ import ( "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework" - fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" plugintesting "k8s.io/kubernetes/pkg/scheduler/framework/plugins/testing" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" ) func createPodWithVolume(pod, pv, pvc string) *v1.Pod { @@ -40,7 +40,7 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod { } func TestSingleZone(t *testing.T) { - pvLister := fakeframework.PersistentVolumeLister{ + pvLister := tf.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelFailureDomainBetaZone: "us-west1-a"}}, }, @@ -61,7 +61,7 @@ func TestSingleZone(t *testing.T) { }, } - pvcLister := fakeframework.PersistentVolumeClaimLister{ + pvcLister := tf.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, @@ -283,7 +283,7 @@ func TestSingleZone(t *testing.T) { } func TestMultiZone(t *testing.T) { - pvLister := fakeframework.PersistentVolumeLister{ + pvLister := tf.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelFailureDomainBetaZone: "us-west1-a"}}, }, @@ -301,7 +301,7 @@ func TestMultiZone(t *testing.T) { }, } - pvcLister := fakeframework.PersistentVolumeClaimLister{ + pvcLister := tf.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, @@ -423,7 +423,7 @@ func TestWithBinding(t *testing.T) { classImmediate = "Class_Immediate" ) - scLister := fakeframework.StorageClassLister{ + scLister := tf.StorageClassLister{ { ObjectMeta: metav1.ObjectMeta{Name: classImmediate}, }, @@ -433,13 +433,13 @@ func TestWithBinding(t *testing.T) { }, } - pvLister := fakeframework.PersistentVolumeLister{ + pvLister := tf.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelFailureDomainBetaZone: "us-west1-a"}}, }, } - pvcLister := fakeframework.PersistentVolumeClaimLister{ + pvcLister := tf.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, diff --git a/pkg/scheduler/framework/preemption/preemption.go b/pkg/scheduler/framework/preemption/preemption.go index d374439645541..9ffb22199a5cb 100644 --- a/pkg/scheduler/framework/preemption/preemption.go +++ b/pkg/scheduler/framework/preemption/preemption.go @@ -26,16 +26,15 @@ import ( v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/util/feature" - corev1apply "k8s.io/client-go/applyconfigurations/core/v1" corelisters "k8s.io/client-go/listers/core/v1" policylisters "k8s.io/client-go/listers/policy/v1" corev1helpers "k8s.io/component-helpers/scheduling/corev1" "k8s.io/klog/v2" extenderv1 "k8s.io/kube-scheduler/extender/v1" + apipod "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/parallelize" @@ -43,11 +42,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/util" ) -const ( - // fieldManager used to add pod disruption condition to the victim pods - fieldManager = "KubeScheduler" -) - // Candidate represents a nominated node on which the preemptor can be scheduled, // along with the list of victims that should be evicted for the preemptor to fit the node. type Candidate interface { @@ -182,7 +176,7 @@ func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeT NumAllNodes: len(nodeToStatusMap), Diagnosis: framework.Diagnosis{ NodeToStatusMap: nodeToStatusMap, - // Leave FailedPlugins as nil as it won't be used on moving Pods. + // Leave UnschedulablePlugins or PendingPlugins as nil as it won't be used on moving Pods. }, } // Specify nominatedNodeName to clear the pod's nominatedNodeName status, if applicable. @@ -362,19 +356,20 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. klog.V(2).InfoS("Preemptor pod rejected a waiting pod", "preemptor", klog.KObj(pod), "waitingPod", klog.KObj(victim), "node", c.Name()) } else { if feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { - victimPodApply := corev1apply.Pod(victim.Name, victim.Namespace).WithStatus(corev1apply.PodStatus()) - victimPodApply.Status.WithConditions(corev1apply.PodCondition(). - WithType(v1.DisruptionTarget). - WithStatus(v1.ConditionTrue). - WithReason(v1.PodReasonPreemptionByScheduler). - WithMessage(fmt.Sprintf("%s: preempting to accommodate a higher priority pod", pod.Spec.SchedulerName)). - WithLastTransitionTime(metav1.Now()), - ) - - if _, err := cs.CoreV1().Pods(victim.Namespace).ApplyStatus(ctx, victimPodApply, metav1.ApplyOptions{FieldManager: fieldManager, Force: true}); err != nil { - logger.Error(err, "Could not add DisruptionTarget condition due to preemption", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod)) - errCh.SendErrorWithCancel(err, cancel) - return + condition := &v1.PodCondition{ + Type: v1.DisruptionTarget, + Status: v1.ConditionTrue, + Reason: v1.PodReasonPreemptionByScheduler, + Message: fmt.Sprintf("%s: preempting to accommodate a higher priority pod", pod.Spec.SchedulerName), + } + newStatus := pod.Status.DeepCopy() + updated := apipod.UpdatePodCondition(newStatus, condition) + if updated { + if err := util.PatchPodStatus(ctx, cs, victim, newStatus); err != nil { + logger.Error(err, "Could not add DisruptionTarget condition due to preemption", "pod", klog.KObj(victim), "preemptor", klog.KObj(pod)) + errCh.SendErrorWithCancel(err, cancel) + return + } } } if err := util.DeletePod(ctx, cs, victim); err != nil { @@ -570,7 +565,7 @@ func (ev *Evaluator) DryRunPreemption(ctx context.Context, pod *v1.Pod, potentia var statusesLock sync.Mutex var errs []error checkNode := func(i int) { - nodeInfoCopy := potentialNodes[(int(offset)+i)%len(potentialNodes)].Clone() + nodeInfoCopy := potentialNodes[(int(offset)+i)%len(potentialNodes)].Snapshot() stateCopy := ev.State.Clone() pods, numPDBViolations, status := ev.SelectVictimsOnNode(ctx, stateCopy, pod, nodeInfoCopy, pdbs) if status.IsSuccess() && len(pods) != 0 { diff --git a/pkg/scheduler/framework/preemption/preemption_test.go b/pkg/scheduler/framework/preemption/preemption_test.go index ff68f2309a7f2..632f3dd9ea1b0 100644 --- a/pkg/scheduler/framework/preemption/preemption_test.go +++ b/pkg/scheduler/framework/preemption/preemption_test.go @@ -48,6 +48,7 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" ) var ( @@ -268,9 +269,9 @@ func TestDryRunPreemption(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logger, _ := ktesting.NewTestContext(t) - registeredPlugins := append([]st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New)}, - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registeredPlugins := append([]tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New)}, + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), ) var objs []runtime.Object for _, p := range append(tt.testPods, tt.initPods...) { @@ -284,7 +285,7 @@ func TestDryRunPreemption(t *testing.T) { _, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, registeredPlugins, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())), diff --git a/pkg/scheduler/framework/runtime/framework.go b/pkg/scheduler/framework/runtime/framework.go index 168f07024a7de..0b65ea8cd0ad4 100644 --- a/pkg/scheduler/framework/runtime/framework.go +++ b/pkg/scheduler/framework/runtime/framework.go @@ -302,7 +302,7 @@ func NewFramework(ctx context.Context, r Registry, profile *config.KubeScheduler Args: args, }) } - p, err := factory(args, f) + p, err := factory(ctx, args, f) if err != nil { return nil, fmt.Errorf("initializing plugin %q: %w", name, err) } @@ -654,11 +654,11 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framewor continue } if !s.IsSuccess() { - s.SetFailedPlugin(pl.Name()) - if s.IsUnschedulable() { + s.SetPlugin(pl.Name()) + if s.IsRejected() { return nil, s } - return nil, framework.AsStatus(fmt.Errorf("running PreFilter plugin %q: %w", pl.Name(), s.AsError())).WithFailedPlugin(pl.Name()) + return nil, framework.AsStatus(fmt.Errorf("running PreFilter plugin %q: %w", pl.Name(), s.AsError())).WithPlugin(pl.Name()) } if !r.AllNodes() { pluginsWithNodes = append(pluginsWithNodes, pl.Name()) @@ -795,12 +795,12 @@ func (f *frameworkImpl) RunFilterPlugins( continue } if status := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo); !status.IsSuccess() { - if !status.IsUnschedulable() { + if !status.IsRejected() { // Filter plugins are not supposed to return any status other than // Success or Unschedulable. status = framework.AsStatus(fmt.Errorf("running %q filter plugin: %w", pl.Name(), status.AsError())) } - status.SetFailedPlugin(pl.Name()) + status.SetPlugin(pl.Name()) return status } } @@ -836,7 +836,7 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framewo // `result` records the last meaningful(non-noop) PostFilterResult. var result *framework.PostFilterResult var reasons []string - var failedPlugin string + var rejectorPlugin string for _, pl := range f.postFilterPlugins { logger := klog.LoggerWithName(logger, pl.Name()) ctx := klog.NewContext(ctx, logger) @@ -844,10 +844,10 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framewo if s.IsSuccess() { return r, s } else if s.Code() == framework.UnschedulableAndUnresolvable { - return r, s.WithFailedPlugin(pl.Name()) - } else if !s.IsUnschedulable() { + return r, s.WithPlugin(pl.Name()) + } else if !s.IsRejected() { // Any status other than Success, Unschedulable or UnschedulableAndUnresolvable is Error. - return nil, framework.AsStatus(s.AsError()).WithFailedPlugin(pl.Name()) + return nil, framework.AsStatus(s.AsError()).WithPlugin(pl.Name()) } else if r != nil && r.Mode() != framework.ModeNoop { result = r } @@ -855,12 +855,12 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framewo reasons = append(reasons, s.Reasons()...) // Record the first failed plugin unless we proved that // the latter is more relevant. - if len(failedPlugin) == 0 { - failedPlugin = pl.Name() + if len(rejectorPlugin) == 0 { + rejectorPlugin = pl.Name() } } - return result, framework.NewStatus(framework.Unschedulable, reasons...).WithFailedPlugin(failedPlugin) + return result, framework.NewStatus(framework.Unschedulable, reasons...).WithPlugin(rejectorPlugin) } func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.PostFilterPlugin, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) { @@ -922,7 +922,7 @@ func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, s } status = f.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse) - if !status.IsSuccess() && !status.IsUnschedulable() { + if !status.IsSuccess() && !status.IsRejected() { return status } } @@ -942,7 +942,7 @@ func addNominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, sta if len(nominatedPodInfos) == 0 { return false, state, nodeInfo, nil } - nodeInfoOut := nodeInfo.Clone() + nodeInfoOut := nodeInfo.Snapshot() stateOut := state.Clone() podsAdded := false for _, pi := range nominatedPodInfos { @@ -1151,9 +1151,9 @@ func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state *framework. ctx := klog.NewContext(ctx, logger) status = f.runPreBindPlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { - if status.IsUnschedulable() { + if status.IsRejected() { logger.V(4).Info("Pod rejected by PreBind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) - status.SetFailedPlugin(pl.Name()) + status.SetPlugin(pl.Name()) return status } err := status.AsError() @@ -1197,9 +1197,9 @@ func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state *framework.Cyc continue } if !status.IsSuccess() { - if status.IsUnschedulable() { + if status.IsRejected() { logger.V(4).Info("Pod rejected by Bind plugin", "pod", klog.KObj(pod), "node", nodeName, "plugin", pl.Name(), "status", status.Message()) - status.SetFailedPlugin(pl.Name()) + status.SetPlugin(pl.Name()) return status } err := status.AsError() @@ -1271,9 +1271,9 @@ func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state *fra ctx := klog.NewContext(ctx, logger) status = f.runReservePluginReserve(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { - if status.IsUnschedulable() { + if status.IsRejected() { logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message()) - status.SetFailedPlugin(pl.Name()) + status.SetPlugin(pl.Name()) return status } err := status.AsError() @@ -1350,9 +1350,9 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.C ctx := klog.NewContext(ctx, logger) status, timeout := f.runPermitPlugin(ctx, pl, state, pod, nodeName) if !status.IsSuccess() { - if status.IsUnschedulable() { + if status.IsRejected() { logger.V(4).Info("Pod rejected by plugin", "pod", klog.KObj(pod), "plugin", pl.Name(), "status", status.Message()) - return status.WithFailedPlugin(pl.Name()) + return status.WithPlugin(pl.Name()) } if status.IsWait() { // Not allowed to be greater than maxTimeout. @@ -1364,7 +1364,7 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.C } else { err := status.AsError() logger.Error(err, "Plugin failed", "plugin", pl.Name(), "pod", klog.KObj(pod)) - return framework.AsStatus(fmt.Errorf("running Permit plugin %q: %w", pl.Name(), err)).WithFailedPlugin(pl.Name()) + return framework.AsStatus(fmt.Errorf("running Permit plugin %q: %w", pl.Name(), err)).WithPlugin(pl.Name()) } } } @@ -1404,13 +1404,13 @@ func (f *frameworkImpl) WaitOnPermit(ctx context.Context, pod *v1.Pod) *framewor metrics.PermitWaitDuration.WithLabelValues(s.Code().String()).Observe(metrics.SinceInSeconds(startTime)) if !s.IsSuccess() { - if s.IsUnschedulable() { + if s.IsRejected() { logger.V(4).Info("Pod rejected while waiting on permit", "pod", klog.KObj(pod), "status", s.Message()) return s } err := s.AsError() logger.Error(err, "Failed waiting on permit for pod", "pod", klog.KObj(pod)) - return framework.AsStatus(fmt.Errorf("waiting on permit for pod: %w", err)).WithFailedPlugin(s.FailedPlugin()) + return framework.AsStatus(fmt.Errorf("waiting on permit for pod: %w", err)).WithPlugin(s.Plugin()) } return nil } diff --git a/pkg/scheduler/framework/runtime/framework_test.go b/pkg/scheduler/framework/runtime/framework_test.go index 115411dc0079f..75fb31c5ecb19 100644 --- a/pkg/scheduler/framework/runtime/framework_test.go +++ b/pkg/scheduler/framework/runtime/framework_test.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -72,11 +72,11 @@ var cmpOpts = []cmp.Option{ if s1 == nil || s2 == nil { return s1.IsSuccess() && s2.IsSuccess() } - return s1.Code() == s2.Code() && s1.FailedPlugin() == s2.FailedPlugin() && s1.Message() == s2.Message() + return s1.Code() == s2.Code() && s1.Plugin() == s2.Plugin() && s1.Message() == s2.Message() }), } -func newScoreWithNormalizePlugin1(injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { +func newScoreWithNormalizePlugin1(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { var inj injectedResult if err := DecodeInto(injArgs, &inj); err != nil { return nil, err @@ -84,7 +84,7 @@ func newScoreWithNormalizePlugin1(injArgs runtime.Object, f framework.Handle) (f return &TestScoreWithNormalizePlugin{scoreWithNormalizePlugin1, inj}, nil } -func newScoreWithNormalizePlugin2(injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { +func newScoreWithNormalizePlugin2(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { var inj injectedResult if err := DecodeInto(injArgs, &inj); err != nil { return nil, err @@ -92,7 +92,7 @@ func newScoreWithNormalizePlugin2(injArgs runtime.Object, f framework.Handle) (f return &TestScoreWithNormalizePlugin{scoreWithNormalizePlugin2, inj}, nil } -func newScorePlugin1(injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { +func newScorePlugin1(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { var inj injectedResult if err := DecodeInto(injArgs, &inj); err != nil { return nil, err @@ -100,7 +100,7 @@ func newScorePlugin1(injArgs runtime.Object, f framework.Handle) (framework.Plug return &TestScorePlugin{scorePlugin1, inj}, nil } -func newPluginNotImplementingScore(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newPluginNotImplementingScore(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &PluginNotImplementingScore{}, nil } @@ -154,7 +154,7 @@ func (pl *PluginNotImplementingScore) Name() string { return pluginNotImplementingScore } -func newTestPlugin(injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { +func newTestPlugin(_ context.Context, injArgs runtime.Object, f framework.Handle) (framework.Plugin, error) { return &TestPlugin{name: testPlugin}, nil } @@ -296,7 +296,7 @@ func (dp *TestDuplicatePlugin) PreFilterExtensions() framework.PreFilterExtensio var _ framework.PreFilterPlugin = &TestDuplicatePlugin{} -func newDuplicatePlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newDuplicatePlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &TestDuplicatePlugin{}, nil } @@ -326,7 +326,7 @@ func (pl *TestPreEnqueuePlugin) PreEnqueue(ctx context.Context, p *v1.Pod) *fram var _ framework.QueueSortPlugin = &TestQueueSortPlugin{} -func newQueueSortPlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newQueueSortPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &TestQueueSortPlugin{}, nil } @@ -343,7 +343,7 @@ func (pl *TestQueueSortPlugin) Less(_, _ *framework.QueuedPodInfo) bool { var _ framework.BindPlugin = &TestBindPlugin{} -func newBindPlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newBindPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &TestBindPlugin{}, nil } @@ -881,7 +881,7 @@ func TestPreEnqueuePlugins(t *testing.T) { // register all plugins tmpPl := pl if err := registry.Register(pl.Name(), - func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("fail to register preEnqueue plugin (%s)", pl.Name()) @@ -1004,9 +1004,11 @@ func TestRunPreScorePlugins(t *testing.T) { for i, p := range tt.plugins { p := p enabled[i].Name = p.name - r.Register(p.name, func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return p, nil - }) + }); err != nil { + t.Fatalf("fail to register PreScorePlugins plugin (%s)", p.Name()) + } } ctx, cancel := context.WithCancel(context.Background()) @@ -1425,11 +1427,11 @@ func TestPreFilterPlugins(t *testing.T) { preFilter2 := &TestPreFilterWithExtensionsPlugin{} r := make(Registry) r.Register(preFilterPluginName, - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return preFilter1, nil }) r.Register(preFilterWithExtensionsPluginName, - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return preFilter2, nil }) plugins := &config.Plugins{PreFilter: config.PluginSet{Enabled: []config.Plugin{{Name: preFilterWithExtensionsPluginName}, {Name: preFilterPluginName}}}} @@ -1563,9 +1565,11 @@ func TestRunPreFilterPlugins(t *testing.T) { for i, p := range tt.plugins { p := p enabled[i].Name = p.name - r.Register(p.name, func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return p, nil - }) + }); err != nil { + t.Fatalf("fail to register PreFilter plugin (%s)", p.Name()) + } } ctx, cancel := context.WithCancel(context.Background()) @@ -1651,9 +1655,11 @@ func TestRunPreFilterExtensionRemovePod(t *testing.T) { for i, p := range tt.plugins { p := p enabled[i].Name = p.name - r.Register(p.name, func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return p, nil - }) + }); err != nil { + t.Fatalf("fail to register PreFilterExtension plugin (%s)", p.Name()) + } } ctx, cancel := context.WithCancel(context.Background()) @@ -1733,9 +1739,11 @@ func TestRunPreFilterExtensionAddPod(t *testing.T) { for i, p := range tt.plugins { p := p enabled[i].Name = p.name - r.Register(p.name, func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + if err := r.Register(p.name, func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return p, nil - }) + }); err != nil { + t.Fatalf("fail to register PreFilterExtension plugin (%s)", p.Name()) + } } ctx, cancel := context.WithCancel(context.Background()) @@ -1785,7 +1793,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Error)}, }, }, - wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin" filter plugin: %w`, errInjectedFilterStatus)).WithFailedPlugin("TestPlugin"), + wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin" filter plugin: %w`, errInjectedFilterStatus)).WithPlugin("TestPlugin"), }, { name: "UnschedulableFilter", @@ -1795,7 +1803,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectFilterReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectFilterReason).WithPlugin("TestPlugin"), }, { name: "UnschedulableAndUnresolvableFilter", @@ -1806,7 +1814,7 @@ func TestFilterPlugins(t *testing.T) { FilterStatus: int(framework.UnschedulableAndUnresolvable)}, }, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectFilterReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectFilterReason).WithPlugin("TestPlugin"), }, // following tests cover multiple-plugins scenarios { @@ -1821,7 +1829,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Error)}, }, }, - wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin1" filter plugin: %w`, errInjectedFilterStatus)).WithFailedPlugin("TestPlugin1"), + wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin1" filter plugin: %w`, errInjectedFilterStatus)).WithPlugin("TestPlugin1"), }, { name: "UnschedulableAndUnschedulableFilters", @@ -1835,7 +1843,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectFilterReason).WithFailedPlugin("TestPlugin1"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectFilterReason).WithPlugin("TestPlugin1"), }, { name: "UnschedulableAndUnschedulableAndUnresolvableFilters", @@ -1849,7 +1857,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectFilterReason).WithFailedPlugin("TestPlugin1"), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectFilterReason).WithPlugin("TestPlugin1"), }, { name: "SuccessAndSuccessFilters", @@ -1893,7 +1901,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Success)}, }, }, - wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin1" filter plugin: %w`, errInjectedFilterStatus)).WithFailedPlugin("TestPlugin1"), + wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin1" filter plugin: %w`, errInjectedFilterStatus)).WithPlugin("TestPlugin1"), }, { name: "SuccessAndErrorFilters", @@ -1908,7 +1916,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Error)}, }, }, - wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin2" filter plugin: %w`, errInjectedFilterStatus)).WithFailedPlugin("TestPlugin2"), + wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin2" filter plugin: %w`, errInjectedFilterStatus)).WithPlugin("TestPlugin2"), }, { name: "SuccessAndUnschedulableFilters", @@ -1922,7 +1930,7 @@ func TestFilterPlugins(t *testing.T) { inj: injectedResult{FilterStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectFilterReason).WithFailedPlugin("TestPlugin2"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectFilterReason).WithPlugin("TestPlugin2"), }, } @@ -1934,7 +1942,7 @@ func TestFilterPlugins(t *testing.T) { // register all plugins tmpPl := pl if err := registry.Register(pl.name, - func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("fail to register filter plugin (%s)", pl.name) @@ -2018,7 +2026,7 @@ func TestPostFilterPlugins(t *testing.T) { inj: injectedResult{PostFilterStatus: int(framework.Success)}, }, }, - wantStatus: framework.AsStatus(fmt.Errorf(injectReason)).WithFailedPlugin("TestPlugin1"), + wantStatus: framework.AsStatus(fmt.Errorf(injectReason)).WithPlugin("TestPlugin1"), }, { name: "plugin1 failed to make a Pod schedulable, followed by plugin2 which makes the Pod unresolvable", @@ -2032,7 +2040,7 @@ func TestPostFilterPlugins(t *testing.T) { inj: injectedResult{PostFilterStatus: int(framework.UnschedulableAndUnresolvable)}, }, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithFailedPlugin("TestPlugin2"), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithPlugin("TestPlugin2"), }, { name: "both plugins failed to make a Pod schedulable", @@ -2046,7 +2054,7 @@ func TestPostFilterPlugins(t *testing.T) { inj: injectedResult{PostFilterStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, []string{injectReason, injectReason}...).WithFailedPlugin("TestPlugin1"), + wantStatus: framework.NewStatus(framework.Unschedulable, []string{injectReason, injectReason}...).WithPlugin("TestPlugin1"), }, } @@ -2058,7 +2066,7 @@ func TestPostFilterPlugins(t *testing.T) { // register all plugins tmpPl := pl if err := registry.Register(pl.name, - func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("fail to register postFilter plugin (%s)", pl.name) @@ -2158,7 +2166,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) { nominatedPod: highPriorityPod, node: node, nodeInfo: framework.NewNodeInfo(pod), - wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin2" filter plugin: %w`, errInjectedFilterStatus)).WithFailedPlugin("TestPlugin2"), + wantStatus: framework.AsStatus(fmt.Errorf(`running "TestPlugin2" filter plugin: %w`, errInjectedFilterStatus)).WithPlugin("TestPlugin2"), }, { name: "node has a low-priority nominated pod and pre filters return unschedulable", @@ -2190,7 +2198,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) { if tt.preFilterPlugin != nil { if err := registry.Register(tt.preFilterPlugin.name, - func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tt.preFilterPlugin, nil }); err != nil { t.Fatalf("fail to register preFilter plugin (%s)", tt.preFilterPlugin.name) @@ -2202,7 +2210,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) { } if tt.filterPlugin != nil { if err := registry.Register(tt.filterPlugin.name, - func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tt.filterPlugin, nil }); err != nil { t.Fatalf("fail to register filter plugin (%s)", tt.filterPlugin.name) @@ -2265,7 +2273,7 @@ func TestPreBindPlugins(t *testing.T) { inj: injectedResult{PreBindStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithPlugin("TestPlugin"), }, { name: "ErrorPreBindPlugin", @@ -2285,7 +2293,7 @@ func TestPreBindPlugins(t *testing.T) { inj: injectedResult{PreBindStatus: int(framework.UnschedulableAndUnresolvable)}, }, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithPlugin("TestPlugin"), }, { name: "SuccessErrorPreBindPlugins", @@ -2355,7 +2363,7 @@ func TestPreBindPlugins(t *testing.T) { inj: injectedResult{PreBindStatus: int(framework.Success)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithPlugin("TestPlugin"), }, } @@ -2366,7 +2374,7 @@ func TestPreBindPlugins(t *testing.T) { for _, pl := range tt.plugins { tmpPl := pl - if err := registry.Register(pl.name, func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("Unable to register pre bind plugins: %s", pl.name) @@ -2423,7 +2431,7 @@ func TestReservePlugins(t *testing.T) { inj: injectedResult{ReserveStatus: int(framework.Unschedulable)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithPlugin("TestPlugin"), }, { name: "ErrorReservePlugin", @@ -2443,7 +2451,7 @@ func TestReservePlugins(t *testing.T) { inj: injectedResult{ReserveStatus: int(framework.UnschedulableAndUnresolvable)}, }, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithPlugin("TestPlugin"), }, { name: "SuccessSuccessReservePlugins", @@ -2513,7 +2521,7 @@ func TestReservePlugins(t *testing.T) { inj: injectedResult{ReserveStatus: int(framework.Success)}, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithFailedPlugin("TestPlugin"), + wantStatus: framework.NewStatus(framework.Unschedulable, injectReason).WithPlugin("TestPlugin"), }, } @@ -2524,7 +2532,7 @@ func TestReservePlugins(t *testing.T) { for _, pl := range tt.plugins { tmpPl := pl - if err := registry.Register(pl.name, func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("Unable to register pre bind plugins: %s", pl.name) @@ -2581,7 +2589,7 @@ func TestPermitPlugins(t *testing.T) { inj: injectedResult{PermitStatus: int(framework.Unschedulable)}, }, }, - want: framework.NewStatus(framework.Unschedulable, injectReason).WithFailedPlugin("TestPlugin"), + want: framework.NewStatus(framework.Unschedulable, injectReason).WithPlugin("TestPlugin"), }, { name: "ErrorPermitPlugin", @@ -2591,7 +2599,7 @@ func TestPermitPlugins(t *testing.T) { inj: injectedResult{PermitStatus: int(framework.Error)}, }, }, - want: framework.AsStatus(fmt.Errorf(`running Permit plugin "TestPlugin": %w`, errInjectedStatus)).WithFailedPlugin("TestPlugin"), + want: framework.AsStatus(fmt.Errorf(`running Permit plugin "TestPlugin": %w`, errInjectedStatus)).WithPlugin("TestPlugin"), }, { name: "UnschedulableAndUnresolvablePermitPlugin", @@ -2601,7 +2609,7 @@ func TestPermitPlugins(t *testing.T) { inj: injectedResult{PermitStatus: int(framework.UnschedulableAndUnresolvable)}, }, }, - want: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithFailedPlugin("TestPlugin"), + want: framework.NewStatus(framework.UnschedulableAndUnresolvable, injectReason).WithPlugin("TestPlugin"), }, { name: "WaitPermitPlugin", @@ -2639,7 +2647,7 @@ func TestPermitPlugins(t *testing.T) { inj: injectedResult{PermitStatus: int(framework.Error)}, }, }, - want: framework.AsStatus(fmt.Errorf(`running Permit plugin "TestPlugin": %w`, errInjectedStatus)).WithFailedPlugin("TestPlugin"), + want: framework.AsStatus(fmt.Errorf(`running Permit plugin "TestPlugin": %w`, errInjectedStatus)).WithPlugin("TestPlugin"), }, } @@ -2650,7 +2658,7 @@ func TestPermitPlugins(t *testing.T) { for _, pl := range tt.plugins { tmpPl := pl - if err := registry.Register(pl.name, func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + if err := registry.Register(pl.name, func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("Unable to register Permit plugin: %s", pl.name) @@ -2817,7 +2825,7 @@ func TestRecordingMetrics(t *testing.T) { plugin := &TestPlugin{name: testPlugin, inj: tt.inject} r := make(Registry) r.Register(testPlugin, - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return plugin, nil }) pluginSet := config.PluginSet{Enabled: []config.Plugin{{Name: testPlugin, Weight: 1}}} @@ -2838,7 +2846,7 @@ func TestRecordingMetrics(t *testing.T) { recorder := metrics.NewMetricsAsyncRecorder(100, time.Nanosecond, ctx.Done()) profile := config.KubeSchedulerProfile{ - PercentageOfNodesToScore: pointer.Int32(testPercentageOfNodesToScore), + PercentageOfNodesToScore: ptr.To[int32](testPercentageOfNodesToScore), SchedulerName: testProfileName, Plugins: plugins, } @@ -2941,7 +2949,7 @@ func TestRunBindPlugins(t *testing.T) { name := fmt.Sprintf("bind-%d", i) plugin := &TestPlugin{name: name, inj: injectedResult{BindStatus: int(inj)}} r.Register(name, - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return plugin, nil }) pluginSet.Enabled = append(pluginSet.Enabled, config.Plugin{Name: name}) @@ -2952,7 +2960,7 @@ func TestRunBindPlugins(t *testing.T) { recorder := metrics.NewMetricsAsyncRecorder(100, time.Nanosecond, ctx.Done()) profile := config.KubeSchedulerProfile{ SchedulerName: testProfileName, - PercentageOfNodesToScore: pointer.Int32(testPercentageOfNodesToScore), + PercentageOfNodesToScore: ptr.To[int32](testPercentageOfNodesToScore), Plugins: plugins, } fwk, err := newFrameworkWithQueueSortAndBind(ctx, r, profile, withMetricsRecorder(recorder)) @@ -3000,7 +3008,7 @@ func TestPermitWaitDurationMetric(t *testing.T) { plugin := &TestPlugin{name: testPlugin, inj: tt.inject} r := make(Registry) err := r.Register(testPlugin, - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return plugin, nil }) if err != nil { @@ -3043,7 +3051,7 @@ func TestWaitOnPermit(t *testing.T) { action: func(f framework.Framework) { f.GetWaitingPod(pod.UID).Reject(permitPlugin, "reject message") }, - want: framework.NewStatus(framework.Unschedulable, "reject message").WithFailedPlugin(permitPlugin), + want: framework.NewStatus(framework.Unschedulable, "reject message").WithPlugin(permitPlugin), }, { name: "Allow Waiting Pod", @@ -3059,7 +3067,7 @@ func TestWaitOnPermit(t *testing.T) { testPermitPlugin := &TestPermitPlugin{} r := make(Registry) r.Register(permitPlugin, - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return testPermitPlugin, nil }) plugins := &config.Plugins{ diff --git a/pkg/scheduler/framework/runtime/registry.go b/pkg/scheduler/framework/runtime/registry.go index b0b1d855751a7..b6fca3c29cd1e 100644 --- a/pkg/scheduler/framework/runtime/registry.go +++ b/pkg/scheduler/framework/runtime/registry.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "k8s.io/apimachinery/pkg/runtime" @@ -27,16 +28,16 @@ import ( ) // PluginFactory is a function that builds a plugin. -type PluginFactory = func(configuration runtime.Object, f framework.Handle) (framework.Plugin, error) +type PluginFactory = func(ctx context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) // PluginFactoryWithFts is a function that builds a plugin with certain feature gates. -type PluginFactoryWithFts func(runtime.Object, framework.Handle, plfeature.Features) (framework.Plugin, error) +type PluginFactoryWithFts func(context.Context, runtime.Object, framework.Handle, plfeature.Features) (framework.Plugin, error) // FactoryAdapter can be used to inject feature gates for a plugin that needs // them when the caller expects the older PluginFactory method. func FactoryAdapter(fts plfeature.Features, withFts PluginFactoryWithFts) PluginFactory { - return func(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { - return withFts(plArgs, fh, fts) + return func(ctx context.Context, plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { + return withFts(ctx, plArgs, fh, fts) } } diff --git a/pkg/scheduler/framework/runtime/registry_test.go b/pkg/scheduler/framework/runtime/registry_test.go index 9ace42394b124..f3182d668eb3c 100644 --- a/pkg/scheduler/framework/runtime/registry_test.go +++ b/pkg/scheduler/framework/runtime/registry_test.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "reflect" "testing" @@ -78,8 +79,8 @@ func TestDecodeInto(t *testing.T) { func isRegistryEqual(registryX, registryY Registry) bool { for name, pluginFactory := range registryY { if val, ok := registryX[name]; ok { - p1, _ := pluginFactory(nil, nil) - p2, _ := val(nil, nil) + p1, _ := pluginFactory(nil, nil, nil) + p2, _ := val(nil, nil, nil) if p1.Name() != p2.Name() { // pluginFactory functions are not the same. return false @@ -110,7 +111,7 @@ func (p *mockNoopPlugin) Name() string { func NewMockNoopPluginFactory() PluginFactory { uuid := uuid.New().String() - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &mockNoopPlugin{uuid}, nil } } diff --git a/pkg/scheduler/framework/runtime/waiting_pods_map.go b/pkg/scheduler/framework/runtime/waiting_pods_map.go index d5df63974578f..0446657522981 100644 --- a/pkg/scheduler/framework/runtime/waiting_pods_map.go +++ b/pkg/scheduler/framework/runtime/waiting_pods_map.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -159,7 +159,7 @@ func (w *waitingPod) Reject(pluginName, msg string) { // The select clause works as a non-blocking send. // If there is no receiver, it's a no-op (default case). select { - case w.s <- framework.NewStatus(framework.Unschedulable, msg).WithFailedPlugin(pluginName): + case w.s <- framework.NewStatus(framework.Unschedulable, msg).WithPlugin(pluginName): default: } } diff --git a/pkg/scheduler/framework/types.go b/pkg/scheduler/framework/types.go index edce3a5acfb99..50566190d2b13 100644 --- a/pkg/scheduler/framework/types.go +++ b/pkg/scheduler/framework/types.go @@ -85,20 +85,22 @@ type ClusterEventWithHint struct { // and filters out events to reduce useless retry of Pod's scheduling. // It's an optional field. If not set, // the scheduling of Pods will be always retried with backoff when this Event happens. - // (the same as QueueAfterBackoff) + // (the same as Queue) QueueingHintFn QueueingHintFn } // QueueingHintFn returns a hint that signals whether the event can make a Pod, // which was rejected by this plugin in the past scheduling cycle, schedulable or not. // It's called before a Pod gets moved from unschedulableQ to backoffQ or activeQ. +// If it returns an error, we'll take the returned QueueingHint as `Queue` at the caller whatever we returned here so that +// we can prevent the Pod from being stuck in the unschedulable pod pool. // // - `pod`: the Pod to be enqueued, which is rejected by this plugin in the past. // - `oldObj` `newObj`: the object involved in that event. // - For example, the given event is "Node deleted", the `oldObj` will be that deleted Node. // - `oldObj` is nil if the event is add event. // - `newObj` is nil if the event is delete event. -type QueueingHintFn func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) QueueingHint +type QueueingHintFn func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (QueueingHint, error) type QueueingHint int @@ -107,29 +109,16 @@ const ( // scheduling of the pod. QueueSkip QueueingHint = iota - // QueueAfterBackoff implies that the Pod may be schedulable by the event, - // and worth retrying the scheduling again after backoff. - QueueAfterBackoff - - // QueueImmediately is returned only when it is highly possible that the Pod gets scheduled in the next scheduling. - // You should only return QueueImmediately when there is a high chance that the Pod gets scheduled in the next scheduling. - // Otherwise, it's detrimental to scheduling throughput. - // For example, when the Pod was rejected as waiting for an external resource to be provisioned, that is directly tied to the Pod, - // and the event is that the resource is provisioned, then you can return QueueImmediately. - // As a counterexample, when the Pod was rejected due to insufficient memory resource, - // and the event is that more memory on Node is available, then you should return QueueAfterBackoff instead of QueueImmediately - // because other Pods may be waiting for the same resources and only a few of them would schedule in the next scheduling cycle. - QueueImmediately + // Queue implies that the Pod may be schedulable by the event. + Queue ) func (s QueueingHint) String() string { switch s { case QueueSkip: return "QueueSkip" - case QueueAfterBackoff: - return "QueueAfterBackoff" - case QueueImmediately: - return "QueueImmediately" + case Queue: + return "Queue" } return "" } @@ -177,9 +166,11 @@ type QueuedPodInfo struct { // It shouldn't be updated once initialized. It's used to record the e2e scheduling // latency for a pod. InitialAttemptTimestamp *time.Time - // If a Pod failed in a scheduling cycle, record the plugin names it failed by. + // UnschedulablePlugins records the plugin names that the Pod failed with Unschedulable or UnschedulableAndUnresolvable status. // It's registered only when the Pod is rejected in PreFilter, Filter, Reserve, or Permit (WaitOnPermit). UnschedulablePlugins sets.Set[string] + // PendingPlugins records the plugin names that the Pod failed with Pending status. + PendingPlugins sets.Set[string] // Whether the Pod is scheduling gated (by PreEnqueuePlugins) or not. Gated bool } @@ -290,8 +281,11 @@ type WeightedAffinityTerm struct { // Diagnosis records the details to diagnose a scheduling failure. type Diagnosis struct { - NodeToStatusMap NodeToStatusMap + NodeToStatusMap NodeToStatusMap + // UnschedulablePlugins are plugins that returns Unschedulable or UnschedulableAndUnresolvable. UnschedulablePlugins sets.Set[string] + // UnschedulablePlugins are plugins that returns Pending. + PendingPlugins sets.Set[string] // PreFilterMsg records the messages returned from PreFilter plugins. PreFilterMsg string // PostFilterMsg records the messages returned from PostFilter plugins. @@ -310,6 +304,24 @@ const ( NoNodeAvailableMsg = "0/%v nodes are available" ) +func (d *Diagnosis) AddPluginStatus(sts *Status) { + if sts.Plugin() == "" { + return + } + if sts.IsRejected() { + if d.UnschedulablePlugins == nil { + d.UnschedulablePlugins = sets.New[string]() + } + d.UnschedulablePlugins.Insert(sts.Plugin()) + } + if sts.Code() == Pending { + if d.PendingPlugins == nil { + d.PendingPlugins = sets.New[string]() + } + d.PendingPlugins.Insert(sts.Plugin()) + } +} + // Error returns detailed information of why the pod failed to fit on each node. // A message format is "0/X nodes are available: . . ." func (f *FitError) Error() string { @@ -460,8 +472,20 @@ func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffini type ImageStateSummary struct { // Size of the image Size int64 - // Used to track how many nodes have this image + // Used to track how many nodes have this image, it is computed from the Nodes field below + // during the execution of Snapshot. NumNodes int + // A set of node names for nodes having this image present. This field is used for + // keeping track of the nodes during update/add/remove events. + Nodes sets.Set[string] +} + +// Snapshot returns a copy without Nodes field of ImageStateSummary +func (iss *ImageStateSummary) Snapshot() *ImageStateSummary { + return &ImageStateSummary{ + Size: iss.Size, + NumNodes: iss.Nodes.Len(), + } } // NodeInfo is node level aggregated information. @@ -638,15 +662,15 @@ func (n *NodeInfo) Node() *v1.Node { return n.node } -// Clone returns a copy of this node. -func (n *NodeInfo) Clone() *NodeInfo { +// Snapshot returns a copy of this node, Except that ImageStates is copied without the Nodes field. +func (n *NodeInfo) Snapshot() *NodeInfo { clone := &NodeInfo{ node: n.node, Requested: n.Requested.Clone(), NonZeroRequested: n.NonZeroRequested.Clone(), Allocatable: n.Allocatable.Clone(), UsedPorts: make(HostPortInfo), - ImageStates: n.ImageStates, + ImageStates: make(map[string]*ImageStateSummary), PVCRefCounts: make(map[string]int), Generation: n.Generation, } @@ -669,6 +693,13 @@ func (n *NodeInfo) Clone() *NodeInfo { if len(n.PodsWithRequiredAntiAffinity) > 0 { clone.PodsWithRequiredAntiAffinity = append([]*PodInfo(nil), n.PodsWithRequiredAntiAffinity...) } + if len(n.ImageStates) > 0 { + state := make(map[string]*ImageStateSummary, len(n.ImageStates)) + for imageName, imageState := range n.ImageStates { + state[imageName] = imageState.Snapshot() + } + clone.ImageStates = state + } for key, value := range n.PVCRefCounts { clone.PVCRefCounts[key] = value } diff --git a/pkg/scheduler/framework/types_test.go b/pkg/scheduler/framework/types_test.go index fd9abd792735d..74f9695b28fa2 100644 --- a/pkg/scheduler/framework/types_test.go +++ b/pkg/scheduler/framework/types_test.go @@ -19,7 +19,6 @@ package framework import ( "fmt" "reflect" - "strings" "testing" "github.com/google/go-cmp/cmp" @@ -31,6 +30,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/features" + st "k8s.io/kubernetes/pkg/scheduler/testing" ) func TestNewResource(t *testing.T) { @@ -210,49 +210,30 @@ func TestSetMaxResource(t *testing.T) { } } -type testingMode interface { - Fatalf(format string, args ...interface{}) -} - -func makeBasePod(t testingMode, nodeName, objName, cpu, mem, extended string, ports []v1.ContainerPort, volumes []v1.Volume) *v1.Pod { - req := v1.ResourceList{} - if cpu != "" { - req = v1.ResourceList{ - v1.ResourceCPU: resource.MustParse(cpu), - v1.ResourceMemory: resource.MustParse(mem), - } - if extended != "" { - parts := strings.Split(extended, ":") - if len(parts) != 2 { - t.Fatalf("Invalid extended resource string: \"%s\"", extended) - } - req[v1.ResourceName(parts[0])] = resource.MustParse(parts[1]) - } - } - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(objName), - Namespace: "node_info_cache_test", - Name: objName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Resources: v1.ResourceRequirements{ - Requests: req, - }, - Ports: ports, - }}, - NodeName: nodeName, - Volumes: volumes, - }, - } -} - func TestNewNodeInfo(t *testing.T) { nodeName := "test-node" pods := []*v1.Pod{ - makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}, nil), - makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}, nil), + st.MakePod().UID("test-1").Namespace("node_info_cache_test").Name("test-1").Node(nodeName). + Containers([]v1.Container{st.MakeContainer().ResourceRequests(map[v1.ResourceName]string{ + v1.ResourceCPU: "100m", + v1.ResourceMemory: "500", + }).ContainerPort([]v1.ContainerPort{{ + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }}).Obj()}). + Obj(), + + st.MakePod().UID("test-2").Namespace("node_info_cache_test").Name("test-2").Node(nodeName). + Containers([]v1.Container{st.MakeContainer().ResourceRequests(map[v1.ResourceName]string{ + v1.ResourceCPU: "200m", + v1.ResourceMemory: "1Ki", + }).ContainerPort([]v1.ContainerPort{{ + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }}).Obj()}). + Obj(), } expected := &NodeInfo{ @@ -513,7 +494,7 @@ func TestNodeInfoClone(t *testing.T) { for i, test := range tests { t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) { - ni := test.nodeInfo.Clone() + ni := test.nodeInfo.Snapshot() // Modify the field to check if the result is a clone of the origin one. test.nodeInfo.Generation += 10 test.nodeInfo.UsedPorts.Remove("127.0.0.1", "TCP", 80) @@ -841,10 +822,28 @@ func TestNodeInfoAddPod(t *testing.T) { func TestNodeInfoRemovePod(t *testing.T) { nodeName := "test-node" pods := []*v1.Pod{ - makeBasePod(t, nodeName, "test-1", "100m", "500", "", - []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}, - []v1.Volume{{VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "pvc-1"}}}}), - makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}, nil), + st.MakePod().UID("test-1").Namespace("node_info_cache_test").Name("test-1").Node(nodeName). + Containers([]v1.Container{st.MakeContainer().ResourceRequests(map[v1.ResourceName]string{ + v1.ResourceCPU: "100m", + v1.ResourceMemory: "500", + }).ContainerPort([]v1.ContainerPort{{ + HostIP: "127.0.0.1", + HostPort: 80, + Protocol: "TCP", + }}).Obj()}). + Volumes([]v1.Volume{{VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "pvc-1"}}}}). + Obj(), + + st.MakePod().UID("test-2").Namespace("node_info_cache_test").Name("test-2").Node(nodeName). + Containers([]v1.Container{st.MakeContainer().ResourceRequests(map[v1.ResourceName]string{ + v1.ResourceCPU: "200m", + v1.ResourceMemory: "1Ki", + }).ContainerPort([]v1.ContainerPort{{ + HostIP: "127.0.0.1", + HostPort: 8080, + Protocol: "TCP", + }}).Obj()}). + Obj(), } // add pod Overhead @@ -861,7 +860,7 @@ func TestNodeInfoRemovePod(t *testing.T) { expectedNodeInfo *NodeInfo }{ { - pod: makeBasePod(t, nodeName, "non-exist", "0", "0", "", []v1.ContainerPort{{}}, []v1.Volume{}), + pod: st.MakePod().UID("non-exist").Namespace("node_info_cache_test").Node(nodeName).Obj(), errExpected: true, expectedNodeInfo: &NodeInfo{ node: &v1.Node{ diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 4e94b4b3baaf0..83b57d30eb098 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -71,8 +71,8 @@ type cacheImpl struct { // head of the linked list. headNode *nodeInfoListItem nodeTree *nodeTree - // A map from image name to its imageState. - imageStates map[string]*imageState + // A map from image name to its ImageStateSummary. + imageStates map[string]*framework.ImageStateSummary } type podState struct { @@ -84,21 +84,6 @@ type podState struct { bindingFinished bool } -type imageState struct { - // Size of the image - size int64 - // A set of node names for nodes having this image present - nodes sets.Set[string] -} - -// createImageStateSummary returns a summarizing snapshot of the given image's state. -func (cache *cacheImpl) createImageStateSummary(state *imageState) *framework.ImageStateSummary { - return &framework.ImageStateSummary{ - Size: state.size, - NumNodes: len(state.nodes), - } -} - func newCache(ctx context.Context, ttl, period time.Duration) *cacheImpl { logger := klog.FromContext(ctx) return &cacheImpl{ @@ -110,7 +95,7 @@ func newCache(ctx context.Context, ttl, period time.Duration) *cacheImpl { nodeTree: newNodeTree(logger, nil), assumedPods: sets.New[string](), podStates: make(map[string]*podState), - imageStates: make(map[string]*imageState), + imageStates: make(map[string]*framework.ImageStateSummary), } } @@ -182,7 +167,7 @@ func (cache *cacheImpl) Dump() *Dump { nodes := make(map[string]*framework.NodeInfo, len(cache.nodes)) for k, v := range cache.nodes { - nodes[k] = v.info.Clone() + nodes[k] = v.info.Snapshot() } return &Dump{ @@ -233,7 +218,7 @@ func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapsho existing = &framework.NodeInfo{} nodeSnapshot.nodeInfoMap[np.Name] = existing } - clone := node.info.Clone() + clone := node.info.Snapshot() // We track nodes that have pods with affinity, here we check if this node changed its // status from having pods with affinity to NOT having pods with affinity or the other // way around. @@ -629,13 +614,12 @@ func (cache *cacheImpl) AddNode(logger klog.Logger, node *v1.Node) *framework.No cache.nodeTree.addNode(logger, node) cache.addNodeImageStates(node, n.info) n.info.SetNode(node) - return n.info.Clone() + return n.info.Snapshot() } func (cache *cacheImpl) UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node) *framework.NodeInfo { cache.mu.Lock() defer cache.mu.Unlock() - n, ok := cache.nodes[newNode.Name] if !ok { n = newNodeInfoListItem(framework.NewNodeInfo()) @@ -649,7 +633,7 @@ func (cache *cacheImpl) UpdateNode(logger klog.Logger, oldNode, newNode *v1.Node cache.nodeTree.updateNode(logger, oldNode, newNode) cache.addNodeImageStates(newNode, n.info) n.info.SetNode(newNode) - return n.info.Clone() + return n.info.Snapshot() } // RemoveNode removes a node from the cache's tree. @@ -693,17 +677,17 @@ func (cache *cacheImpl) addNodeImageStates(node *v1.Node, nodeInfo *framework.No // update the entry in imageStates state, ok := cache.imageStates[name] if !ok { - state = &imageState{ - size: image.SizeBytes, - nodes: sets.New(node.Name), + state = &framework.ImageStateSummary{ + Size: image.SizeBytes, + Nodes: sets.New(node.Name), } cache.imageStates[name] = state } else { - state.nodes.Insert(node.Name) + state.Nodes.Insert(node.Name) } - // create the imageStateSummary for this image + // create the ImageStateSummary for this image if _, ok := newSum[name]; !ok { - newSum[name] = cache.createImageStateSummary(state) + newSum[name] = state } } } @@ -722,8 +706,8 @@ func (cache *cacheImpl) removeNodeImageStates(node *v1.Node) { for _, name := range image.Names { state, ok := cache.imageStates[name] if ok { - state.nodes.Delete(node.Name) - if len(state.nodes) == 0 { + state.Nodes.Delete(node.Name) + if state.Nodes.Len() == 0 { // Remove the unused image to make sure the length of // imageStates represents the total number of different // images on all nodes diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index 28e2a738ac5d2..ccc3d9b2b4994 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -1037,7 +1037,7 @@ func TestForgetPod(t *testing.T) { } // buildNodeInfo creates a NodeInfo by simulating node operations in cache. -func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo { +func buildNodeInfo(node *v1.Node, pods []*v1.Pod, imageStates map[string]*framework.ImageStateSummary) *framework.NodeInfo { expected := framework.NewNodeInfo() expected.SetNode(node) expected.Allocatable = framework.NewResource(node.Status.Allocatable) @@ -1045,48 +1045,83 @@ func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo { for _, pod := range pods { expected.AddPod(pod) } + for _, image := range node.Status.Images { + for _, name := range image.Names { + if state, ok := imageStates[name]; ok { + expected.ImageStates[name] = state + } + } + } return expected } +// buildImageStates creates ImageStateSummary of image from nodes that will be added in cache. +func buildImageStates(nodes []*v1.Node) map[string]*framework.ImageStateSummary { + imageStates := make(map[string]*framework.ImageStateSummary) + for _, item := range nodes { + for _, image := range item.Status.Images { + for _, name := range image.Names { + if state, ok := imageStates[name]; !ok { + state = &framework.ImageStateSummary{ + Size: image.SizeBytes, + Nodes: sets.New[string](item.Name), + } + imageStates[name] = state + } else { + state.Nodes.Insert(item.Name) + } + } + } + } + return imageStates +} + // TestNodeOperators tests node operations of cache, including add, update // and remove. func TestNodeOperators(t *testing.T) { // Test data - nodeName := "test-node" - cpu1 := resource.MustParse("1000m") - mem100m := resource.MustParse("100m") cpuHalf := resource.MustParse("500m") mem50m := resource.MustParse("50m") - resourceFooName := "example.com/foo" - resourceFoo := resource.MustParse("1") - + resourceList1 := map[v1.ResourceName]string{ + v1.ResourceCPU: "1000m", + v1.ResourceMemory: "100m", + v1.ResourceName("example.com/foo"): "1", + } + resourceList2 := map[v1.ResourceName]string{ + v1.ResourceCPU: "500m", + v1.ResourceMemory: "50m", + v1.ResourceName("example.com/foo"): "2", + } + taints := []v1.Taint{ + { + Key: "test-key", + Value: "test-value", + Effect: v1.TaintEffectPreferNoSchedule, + }, + } + imageStatus1 := map[string]int64{ + "gcr.io/80:latest": 80 * mb, + "gcr.io/80:v1": 80 * mb, + "gcr.io/300:latest": 300 * mb, + "gcr.io/300:v1": 300 * mb, + } + imageStatus2 := map[string]int64{ + "gcr.io/600:latest": 600 * mb, + "gcr.io/80:latest": 80 * mb, + "gcr.io/900:latest": 900 * mb, + } tests := []struct { - name string - node *v1.Node - pods []*v1.Pod + name string + nodes []*v1.Node + pods []*v1.Pod }{ { name: "operate the node with one pod", - node: &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - v1.ResourceCPU: cpu1, - v1.ResourceMemory: mem100m, - v1.ResourceName(resourceFooName): resourceFoo, - }, - }, - Spec: v1.NodeSpec{ - Taints: []v1.Taint{ - { - Key: "test-key", - Value: "test-value", - Effect: v1.TaintEffectPreferNoSchedule, - }, - }, - }, + nodes: []*v1.Node{ + &st.MakeNode().Name("test-node-1").Capacity(resourceList1).Taints(taints).Images(imageStatus1).Node, + &st.MakeNode().Name("test-node-2").Capacity(resourceList2).Taints(taints).Images(imageStatus2).Node, + &st.MakeNode().Name("test-node-3").Capacity(resourceList1).Taints(taints).Images(imageStatus1).Node, + &st.MakeNode().Name("test-node-4").Capacity(resourceList2).Taints(taints).Images(imageStatus2).Node, }, pods: []*v1.Pod{ { @@ -1095,7 +1130,7 @@ func TestNodeOperators(t *testing.T) { UID: types.UID("pod1"), }, Spec: v1.PodSpec{ - NodeName: nodeName, + NodeName: "test-node-1", Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ @@ -1119,26 +1154,10 @@ func TestNodeOperators(t *testing.T) { }, { name: "operate the node with two pods", - node: &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{ - v1.ResourceCPU: cpu1, - v1.ResourceMemory: mem100m, - v1.ResourceName(resourceFooName): resourceFoo, - }, - }, - Spec: v1.NodeSpec{ - Taints: []v1.Taint{ - { - Key: "test-key", - Value: "test-value", - Effect: v1.TaintEffectPreferNoSchedule, - }, - }, - }, + nodes: []*v1.Node{ + &st.MakeNode().Name("test-node-1").Capacity(resourceList1).Taints(taints).Images(imageStatus1).Node, + &st.MakeNode().Name("test-node-2").Capacity(resourceList2).Taints(taints).Images(imageStatus2).Node, + &st.MakeNode().Name("test-node-3").Capacity(resourceList1).Taints(taints).Images(imageStatus1).Node, }, pods: []*v1.Pod{ { @@ -1147,7 +1166,7 @@ func TestNodeOperators(t *testing.T) { UID: types.UID("pod1"), }, Spec: v1.PodSpec{ - NodeName: nodeName, + NodeName: "test-node-1", Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ @@ -1166,7 +1185,7 @@ func TestNodeOperators(t *testing.T) { UID: types.UID("pod2"), }, Spec: v1.PodSpec{ - NodeName: nodeName, + NodeName: "test-node-1", Containers: []v1.Container{ { Resources: v1.ResourceRequirements{ @@ -1188,16 +1207,24 @@ func TestNodeOperators(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - expected := buildNodeInfo(tc.node, tc.pods) - node := tc.node + node := tc.nodes[0] + + imageStates := buildImageStates(tc.nodes) + expected := buildNodeInfo(node, tc.pods, imageStates) cache := newCache(ctx, time.Second, time.Second) - cache.AddNode(logger, node) + for _, nodeItem := range tc.nodes { + cache.AddNode(logger, nodeItem) + } for _, pod := range tc.pods { if err := cache.AddPod(logger, pod); err != nil { t.Fatal(err) } } + nodes := map[string]*framework.NodeInfo{} + for nodeItem := cache.headNode; nodeItem != nil; nodeItem = nodeItem.next { + nodes[nodeItem.info.Node().Name] = nodeItem.info + } // Step 1: the node was added into cache successfully. got, found := cache.nodes[node.Name] @@ -1208,14 +1235,20 @@ func TestNodeOperators(t *testing.T) { if err != nil { t.Fatal(err) } - if cache.nodeTree.numNodes != 1 || nodesList[len(nodesList)-1] != node.Name { - t.Errorf("cache.nodeTree is not updated correctly after adding node: %v", node.Name) + if cache.nodeTree.numNodes != len(tc.nodes) || len(nodesList) != len(tc.nodes) { + t.Errorf("cache.nodeTree is not updated correctly after adding node got: %d, expected: %d", + cache.nodeTree.numNodes, len(tc.nodes)) } // Generations are globally unique. We check in our unit tests that they are incremented correctly. expected.Generation = got.info.Generation if diff := cmp.Diff(expected, got.info, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" { - t.Errorf("Unexpected node info from cache (-want, +got):\n%s", diff) + t.Errorf("Failed to add node into scheduler cache (-want,+got):\n%s", diff) + } + + // check imageState of NodeInfo with specific image when node added + if !checkImageStateSummary(nodes, "gcr.io/80:latest", "gcr.io/300:latest") { + t.Error("image have different ImageStateSummary") } // Step 2: dump cached nodes successfully. @@ -1224,12 +1257,16 @@ func TestNodeOperators(t *testing.T) { t.Error(err) } newNode, found := cachedNodes.nodeInfoMap[node.Name] - if !found || len(cachedNodes.nodeInfoMap) != 1 { - t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes) + if !found || len(cachedNodes.nodeInfoMap) != len(tc.nodes) { + t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes.nodeInfoMap, tc.nodes) } expected.Generation = newNode.Generation - if diff := cmp.Diff(expected, newNode, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" { - t.Errorf("Unexpected clone node info (-want, +got):\n%s", diff) + if diff := cmp.Diff(newNode, expected.Snapshot(), cmp.AllowUnexported(framework.NodeInfo{})); diff != "" { + t.Errorf("Failed to clone node:\n%s", diff) + } + // check imageState of NodeInfo with specific image when update snapshot + if !checkImageStateSummary(cachedNodes.nodeInfoMap, "gcr.io/80:latest", "gcr.io/300:latest") { + t.Error("image have different ImageStateSummary") } // Step 3: update node attribute successfully. @@ -1249,13 +1286,17 @@ func TestNodeOperators(t *testing.T) { if diff := cmp.Diff(expected, got.info, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" { t.Errorf("Unexpected schedulertypes after updating node (-want, +got):\n%s", diff) } + // check imageState of NodeInfo with specific image when update node + if !checkImageStateSummary(nodes, "gcr.io/80:latest", "gcr.io/300:latest") { + t.Error("image have different ImageStateSummary") + } // Check nodeTree after update nodesList, err = cache.nodeTree.list() if err != nil { t.Fatal(err) } - if cache.nodeTree.numNodes != 1 || nodesList[len(nodesList)-1] != node.Name { - t.Errorf("unexpected cache.nodeTree after updating node: %v", node.Name) + if cache.nodeTree.numNodes != len(tc.nodes) || len(nodesList) != len(tc.nodes) { + t.Errorf("unexpected cache.nodeTree after updating node") } // Step 4: the node can be removed even if it still has pods. @@ -1278,9 +1319,13 @@ func TestNodeOperators(t *testing.T) { if err != nil { t.Fatal(err) } - if cache.nodeTree.numNodes != 0 || len(nodesList) != 0 { + if cache.nodeTree.numNodes != len(tc.nodes)-1 || len(nodesList) != len(tc.nodes)-1 { t.Errorf("unexpected cache.nodeTree after removing node: %v", node.Name) } + // check imageState of NodeInfo with specific image when delete node + if !checkImageStateSummary(nodes, "gcr.io/80:latest", "gcr.io/300:latest") { + t.Error("image have different ImageStateSummary after removing node") + } // Pods are still in the pods cache. for _, p := range tc.pods { if _, err := cache.GetPod(p); err != nil { @@ -1976,6 +2021,28 @@ func makeBasePod(t testingMode, nodeName, objName, cpu, mem, extended string, po return podWrapper.Obj() } +// checkImageStateSummary collect ImageStateSummary of image traverse nodes, +// the collected ImageStateSummary should be equal +func checkImageStateSummary(nodes map[string]*framework.NodeInfo, imageNames ...string) bool { + for _, imageName := range imageNames { + var imageState *framework.ImageStateSummary + for _, node := range nodes { + state, ok := node.ImageStates[imageName] + if !ok { + continue + } + if imageState == nil { + imageState = state + continue + } + if diff := cmp.Diff(imageState, state); diff != "" { + return false + } + } + } + return true +} + func setupCacheOf1kNodes30kPods(b *testing.B) Cache { logger, ctx := ktesting.NewTestContext(b) ctx, cancel := context.WithCancel(ctx) diff --git a/pkg/scheduler/internal/cache/snapshot.go b/pkg/scheduler/internal/cache/snapshot.go index abd79312a2df9..164f1510c6d5e 100644 --- a/pkg/scheduler/internal/cache/snapshot.go +++ b/pkg/scheduler/internal/cache/snapshot.go @@ -130,7 +130,7 @@ func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.Set[str for _, name := range image.Names { imageStates[name] = &framework.ImageStateSummary{ Size: image.SizeBytes, - NumNodes: len(imageExistenceMap[name]), + NumNodes: imageExistenceMap[name].Len(), } } } diff --git a/pkg/scheduler/internal/heap/heap_test.go b/pkg/scheduler/internal/heap/heap_test.go index b337e3cc38799..7b853efc27d05 100644 --- a/pkg/scheduler/internal/heap/heap_test.go +++ b/pkg/scheduler/internal/heap/heap_test.go @@ -209,7 +209,7 @@ func TestHeap_Get(t *testing.T) { } // Get non-existing object. _, exists, err = h.Get(mkHeapObj("non-existing", 0)) - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } @@ -223,12 +223,12 @@ func TestHeap_GetByKey(t *testing.T) { h.Add(mkHeapObj("baz", 11)) obj, exists, err := h.GetByKey("baz") - if err != nil || exists == false || obj.(testHeapObject).val != 11 { + if err != nil || !exists || obj.(testHeapObject).val != 11 { t.Fatalf("unexpected error in getting element") } // Get non-existing object. _, exists, err = h.GetByKey("non-existing") - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 214be04d22044..04be63bd3cc5a 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -397,59 +397,97 @@ func (p *PriorityQueue) Run(logger klog.Logger) { }, 30*time.Second, p.stop) } -// isPodWorthRequeuing calls QueueingHintFn of only plugins registered in pInfo.unschedulablePlugins. -// If any QueueingHintFn returns QueueImmediately, the scheduling queue is supposed to enqueue this Pod to activeQ. -// If no QueueingHintFn returns QueueImmediately, but some return QueueAfterBackoff, +// queueingStrategy indicates how the scheduling queue should enqueue the Pod from unschedulable pod pool. +type queueingStrategy int + +const ( + // queueSkip indicates that the scheduling queue should skip requeuing the Pod to activeQ/backoffQ. + queueSkip queueingStrategy = iota + // queueAfterBackoff indicates that the scheduling queue should requeue the Pod after backoff is completed. + queueAfterBackoff + // queueImmediately indicates that the scheduling queue should skip backoff and requeue the Pod immediately to activeQ. + queueImmediately +) + +// isPodWorthRequeuing calls QueueingHintFn of only plugins registered in pInfo.unschedulablePlugins and pInfo.PendingPlugins. +// +// If any of pInfo.PendingPlugins return Queue, +// the scheduling queue is supposed to enqueue this Pod to activeQ, skipping backoffQ. +// If any of pInfo.unschedulablePlugins return Queue, // the scheduling queue is supposed to enqueue this Pod to activeQ/backoffQ depending on the remaining backoff time of the Pod. -// If all QueueingHintFn returns QueueSkip, the scheduling queue enqueues the Pod back to unschedulable Pod pool +// If all QueueingHintFns returns Skip, the scheduling queue enqueues the Pod back to unschedulable Pod pool // because no plugin changes the scheduling result via the event. -func (p *PriorityQueue) isPodWorthRequeuing(logger klog.Logger, pInfo *framework.QueuedPodInfo, event framework.ClusterEvent, oldObj, newObj interface{}) framework.QueueingHint { - if pInfo.UnschedulablePlugins.Len() == 0 { - logger.V(6).Info("Worth requeuing because no unschedulable plugins", "pod", klog.KObj(pInfo.Pod)) - return framework.QueueAfterBackoff +func (p *PriorityQueue) isPodWorthRequeuing(logger klog.Logger, pInfo *framework.QueuedPodInfo, event framework.ClusterEvent, oldObj, newObj interface{}) queueingStrategy { + rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) + if rejectorPlugins.Len() == 0 { + logger.V(6).Info("Worth requeuing because no failed plugins", "pod", klog.KObj(pInfo.Pod)) + return queueAfterBackoff } if event.IsWildCard() { + // If the wildcard event is special one as someone wants to force all Pods to move to activeQ/backoffQ. + // We return queueAfterBackoff in this case, while resetting all blocked plugins. logger.V(6).Info("Worth requeuing because the event is wildcard", "pod", klog.KObj(pInfo.Pod)) - return framework.QueueAfterBackoff + return queueAfterBackoff } hintMap, ok := p.queueingHintMap[pInfo.Pod.Spec.SchedulerName] if !ok { // shouldn't reach here unless bug. logger.Error(nil, "No QueueingHintMap is registered for this profile", "profile", pInfo.Pod.Spec.SchedulerName, "pod", klog.KObj(pInfo.Pod)) - return framework.QueueAfterBackoff + return queueAfterBackoff } pod := pInfo.Pod - queueHint := framework.QueueSkip + queueStrategy := queueSkip for eventToMatch, hintfns := range hintMap { if eventToMatch.Resource != event.Resource || eventToMatch.ActionType&event.ActionType == 0 { continue } for _, hintfn := range hintfns { - if !pInfo.UnschedulablePlugins.Has(hintfn.PluginName) { + if !rejectorPlugins.Has(hintfn.PluginName) { + // skip if it's not hintfn from rejectorPlugins. continue } - switch h := hintfn.QueueingHintFn(logger, pod, oldObj, newObj); h { - case framework.QueueSkip: + hint, err := hintfn.QueueingHintFn(logger, pod, oldObj, newObj) + if err != nil { + // If the QueueingHintFn returned an error, we should treat the event as Queue so that we can prevent + // the Pod from being stuck in the unschedulable pod pool. + oldObjMeta, newObjMeta, asErr := util.As[klog.KMetadata](oldObj, newObj) + if asErr != nil { + logger.Error(err, "QueueingHintFn returns error", "event", event, "plugin", hintfn.PluginName, "pod", klog.KObj(pod)) + } else { + logger.Error(err, "QueueingHintFn returns error", "event", event, "plugin", hintfn.PluginName, "pod", klog.KObj(pod), "oldObj", klog.KObj(oldObjMeta), "newObj", klog.KObj(newObjMeta)) + } + hint = framework.Queue + } + if hint == framework.QueueSkip { continue - case framework.QueueImmediately: - return h - case framework.QueueAfterBackoff: - // replace queueHint with the returned value, - // but continue to other queueHintFn to check because other plugins may want to return QueueImmediately. - queueHint = h } + if pInfo.PendingPlugins.Has(hintfn.PluginName) { + // interprets Queue from the Pending plugin as queueImmediately. + // We can return immediately because queueImmediately is the highest priority. + return queueImmediately + } + + // interprets Queue from the unschedulable plugin as queueAfterBackoff. + + if pInfo.PendingPlugins.Len() == 0 { + // We can return immediately because no Pending plugins, which only can make queueImmediately, registered in this Pod, + // and queueAfterBackoff is the second highest priority. + return queueAfterBackoff + } + + // We can't return immediately because there are some Pending plugins registered in this Pod. + // We need to check if those plugins return Queue or not and if they do, we return queueImmediately. + queueStrategy = queueAfterBackoff } } - // No queueing hint function is registered for this event - // or no queueing hint fn returns the value other than QueueSkip. - return queueHint + return queueStrategy } // runPreEnqueuePlugins iterates PreEnqueue function in each registered PreEnqueuePlugin. @@ -613,7 +651,7 @@ func (p *PriorityQueue) SchedulingCycle() int64 { // determineSchedulingHintForInFlightPod looks at the unschedulable plugins of the given Pod // and determines the scheduling hint for this Pod while checking the events that happened during in-flight. -func (p *PriorityQueue) determineSchedulingHintForInFlightPod(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) framework.QueueingHint { +func (p *PriorityQueue) determineSchedulingHintForInFlightPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) queueingStrategy { logger.V(5).Info("Checking events for in-flight pod", "pod", klog.KObj(pInfo.Pod), "unschedulablePlugins", pInfo.UnschedulablePlugins, "inFlightEventsSize", p.inFlightEvents.Len(), "inFlightPodsSize", len(p.inFlightPods)) // AddUnschedulableIfNotPresent is called with the Pod at the end of scheduling or binding. @@ -625,55 +663,50 @@ func (p *PriorityQueue) determineSchedulingHintForInFlightPod(logger klog.Logger // be empty. If it is not, we may have a problem. if len(pInfo.UnschedulablePlugins) != 0 { logger.Error(nil, "In flight Pod isn't found in the scheduling queue. If you see this error log, it's likely a bug in the scheduler.", "pod", klog.KObj(pInfo.Pod)) - return framework.QueueAfterBackoff + return queueAfterBackoff } if p.inFlightEvents.Len() > len(p.inFlightPods) { - return framework.QueueAfterBackoff - } - return framework.QueueSkip - } - - if len(pInfo.UnschedulablePlugins) == 0 { - // When there is no unschedulable plugin, we cannot have a guess which event makes this Pod schedulable. - // If there has been any concurrent event for the pod, it has to go to the backoff queue because the event - // may have been relevant. - for event := inFlightPod.Next(); event != nil; event = event.Next() { - _, ok := event.Value.(*clusterEvent) - if ok { - // There really was a concurrent event. - return framework.QueueAfterBackoff - } + return queueAfterBackoff } - return framework.QueueSkip + return queueSkip + } + + rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) + if len(rejectorPlugins) == 0 { + // No failed plugins are associated with this Pod. + // Meaning something unusual (a temporal failure on kube-apiserver, etc) happened and this Pod gets moved back to the queue. + // In this case, we should retry scheduling it because this Pod may not be retried until the next flush. + return queueAfterBackoff } // check if there is an event that makes this Pod schedulable based on pInfo.UnschedulablePlugins. - schedulingHint := framework.QueueSkip + queueingStrategy := queueSkip for event := inFlightPod.Next(); event != nil; event = event.Next() { e, ok := event.Value.(*clusterEvent) if !ok { - // Must be another pod. Can be ignored. + // Must be another in-flight Pod (*v1.Pod). Can be ignored. continue } logger.V(5).Info("Checking event for in-flight pod", "pod", klog.KObj(pInfo.Pod), "event", e.event.Label) - hint := p.isPodWorthRequeuing(logger, pInfo, e.event, e.oldObj, e.newObj) - if hint == framework.QueueSkip { + switch p.isPodWorthRequeuing(logger, pInfo, e.event, e.oldObj, e.newObj) { + case queueSkip: continue - } - - if hint == framework.QueueImmediately { - // QueueImmediately is the strongest opinion, we don't need to check other events. - schedulingHint = framework.QueueImmediately - break - } - if hint == framework.QueueAfterBackoff { - // replace schedulingHint with QueueAfterBackoff, - // but continue to check other events because we may find it QueueImmediately with other events. - schedulingHint = framework.QueueAfterBackoff + case queueImmediately: + // queueImmediately is the highest priority. + // No need to go through the rest of the events. + return queueImmediately + case queueAfterBackoff: + // replace schedulingHint with queueAfterBackoff + queueingStrategy = queueAfterBackoff + if pInfo.PendingPlugins.Len() == 0 { + // We can return immediately because no Pending plugins, which only can make queueImmediately, registered in this Pod, + // and queueAfterBackoff is the second highest priority. + return queueAfterBackoff + } } } - return schedulingHint + return queueingStrategy } // addUnschedulableIfNotPresentWithoutQueueingHint inserts a pod that cannot be scheduled into @@ -687,12 +720,22 @@ func (p *PriorityQueue) addUnschedulableWithoutQueueingHint(logger klog.Logger, // Refresh the timestamp since the pod is re-added. pInfo.Timestamp = p.clock.Now() + // When the queueing hint is enabled, they are used differently. + // But, we use all of them as UnschedulablePlugins when the queueing hint isn't enabled so that we don't break the old behaviour. + rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) + // If a move request has been received, move it to the BackoffQ, otherwise move // it to unschedulablePods. - for plugin := range pInfo.UnschedulablePlugins { + for plugin := range rejectorPlugins { metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Inc() } - if p.moveRequestCycle >= podSchedulingCycle { + if p.moveRequestCycle >= podSchedulingCycle || len(rejectorPlugins) == 0 { + // Two cases to move a Pod to the active/backoff queue: + // - The Pod is rejected by some plugins, but a move request is received after this Pod's scheduling cycle is started. + // In this case, the received event may be make Pod schedulable and we should retry scheduling it. + // - No unschedulable plugins are associated with this Pod, + // meaning something unusual (a temporal failure on kube-apiserver, etc) happened and this Pod gets moved back to the queue. + // In this case, we should retry scheduling it because this Pod may not be retried until the next flush. if err := p.podBackoffQ.Add(pInfo); err != nil { return fmt.Errorf("error adding pod %v to the backoff queue: %v", klog.KObj(pod), err) } @@ -741,16 +784,17 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(logger klog.Logger, pInfo * // If a move request has been received, move it to the BackoffQ, otherwise move // it to unschedulablePods. - for plugin := range pInfo.UnschedulablePlugins { + rejectorPlugins := pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) + for plugin := range rejectorPlugins { metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Inc() } - // Based on isPodWorthRequeuing(), we check whether this Pod may change its scheduling result by any of events that happened during scheduling. - schedulingHint := p.determineSchedulingHintForInFlightPod(logger, pInfo, podSchedulingCycle) + // We check whether this Pod may change its scheduling result by any of events that happened during scheduling. + schedulingHint := p.determineSchedulingHintForInFlightPod(logger, pInfo) // In this case, we try to requeue this Pod to activeQ/backoffQ. queue := p.requeuePodViaQueueingHint(logger, pInfo, schedulingHint, ScheduleAttemptFailure) - logger.V(3).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", ScheduleAttemptFailure, "queue", queue, "schedulingCycle", podSchedulingCycle, "hint", schedulingHint) + logger.V(3).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", ScheduleAttemptFailure, "queue", queue, "schedulingCycle", podSchedulingCycle, "hint", schedulingHint, "unschedulable plugins", rejectorPlugins) if queue == activeQ { // When the Pod is moved to activeQ, need to let p.cond know so that the Pod will be pop()ed out. p.cond.Broadcast() @@ -841,10 +885,11 @@ func (p *PriorityQueue) Pop() (*framework.QueuedPodInfo, error) { } // Update metrics and reset the set of unschedulable plugins for the next attempt. - for plugin := range pInfo.UnschedulablePlugins { + for plugin := range pInfo.UnschedulablePlugins.Union(pInfo.PendingPlugins) { metrics.UnschedulableReason(plugin, pInfo.Pod.Spec.SchedulerName).Dec() } pInfo.UnschedulablePlugins.Clear() + pInfo.PendingPlugins.Clear() return pInfo, nil } @@ -1055,15 +1100,15 @@ func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event // It returns the queue name Pod goes. // // NOTE: this function assumes lock has been acquired in caller -func (p *PriorityQueue) requeuePodViaQueueingHint(logger klog.Logger, pInfo *framework.QueuedPodInfo, schedulingHint framework.QueueingHint, event string) string { - if schedulingHint == framework.QueueSkip { +func (p *PriorityQueue) requeuePodViaQueueingHint(logger klog.Logger, pInfo *framework.QueuedPodInfo, strategy queueingStrategy, event string) string { + if strategy == queueSkip { p.unschedulablePods.addOrUpdate(pInfo) metrics.SchedulerQueueIncomingPods.WithLabelValues("unschedulable", event).Inc() return unschedulablePods } pod := pInfo.Pod - if schedulingHint == framework.QueueAfterBackoff && p.isPodBackingoff(pInfo) { + if strategy == queueAfterBackoff && p.isPodBackingoff(pInfo) { if err := p.podBackoffQ.Add(pInfo); err != nil { logger.Error(err, "Error adding pod to the backoff queue, queue this Pod to unschedulable pod pool", "pod", klog.KObj(pod)) p.unschedulablePods.addOrUpdate(pInfo) @@ -1074,7 +1119,7 @@ func (p *PriorityQueue) requeuePodViaQueueingHint(logger klog.Logger, pInfo *fra return backoffQ } - // Reach here if schedulingHint is QueueImmediately, or schedulingHint is QueueAfterBackoff but the pod is not backing off. + // Reach here if schedulingHint is QueueImmediately, or schedulingHint is Queue but the pod is not backing off. added, err := p.addToActiveQ(logger, pInfo) if err != nil { @@ -1099,7 +1144,7 @@ func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podIn activated := false for _, pInfo := range podInfoList { schedulingHint := p.isPodWorthRequeuing(logger, pInfo, event, oldObj, newObj) - if schedulingHint == framework.QueueSkip { + if schedulingHint == queueSkip { // QueueingHintFn determined that this Pod isn't worth putting to activeQ or backoffQ by this event. logger.V(5).Info("Event is not making pod schedulable", "pod", klog.KObj(pInfo.Pod), "event", event.Label) continue @@ -1115,8 +1160,7 @@ func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podIn p.moveRequestCycle = p.schedulingCycle - // (no need to check the feature gate because there is always no p.inFlightPods when the feature is disabled.) - if len(p.inFlightPods) != 0 { + if p.isSchedulingQueueHintEnabled && len(p.inFlightPods) != 0 { logger.V(5).Info("Event received while pods are in flight", "event", event.Label, "numPods", len(p.inFlightPods)) // AddUnschedulableIfNotPresent might get called for in-flight Pods later, and in // AddUnschedulableIfNotPresent we need to know whether events were diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 90072f26b95d7..5a35c197a8125 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -87,14 +87,11 @@ var ( cmpopts.IgnoreFields(nominator{}, "podLister", "lock"), } - queueHintReturnQueueAfterBackoff = func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { - return framework.QueueAfterBackoff + queueHintReturnQueue = func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { + return framework.Queue, nil } - queueHintReturnQueueImmediately = func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { - return framework.QueueImmediately - } - queueHintReturnQueueSkip = func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { - return framework.QueueSkip + queueHintReturnSkip = func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { + return framework.QueueSkip, nil } ) @@ -244,7 +241,7 @@ func Test_InFlightPods(t *testing.T) { AssignedPodAdd: { { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, }, }, @@ -340,7 +337,7 @@ func Test_InFlightPods(t *testing.T) { AssignedPodAdd: { { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, }, }, @@ -364,7 +361,7 @@ func Test_InFlightPods(t *testing.T) { AssignedPodAdd: { { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, @@ -389,14 +386,14 @@ func Test_InFlightPods(t *testing.T) { { // It will be ignored because the event is not NodeAdd. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, }, }, { - name: "pod is enqueued to unschedulable pod pool because the failed plugin has a hint fn but it returns QueueSkip", + name: "pod is enqueued to unschedulable pod pool because the failed plugin has a hint fn but it returns Skip", isSchedulingQueueHintEnabled: true, initialPods: []*v1.Pod{pod}, actions: []action{ @@ -414,20 +411,24 @@ func Test_InFlightPods(t *testing.T) { AssignedPodAdd: { { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, }, }, }, }, { - name: "pod is enqueued to activeQ because the failed plugin has a hint fn and it returns QueueImmediately", + name: "pod is enqueued to activeQ because the Pending plugins has a hint fn and it returns Queue", isSchedulingQueueHintEnabled: true, initialPods: []*v1.Pod{pod}, actions: []action{ {podPopped: pod}, {eventHappens: &AssignedPodAdd}, - {podEnqueued: newQueuedPodInfoForLookup(pod, "fooPlugin1", "fooPlugin2", "fooPlugin3")}, + {podEnqueued: &framework.QueuedPodInfo{ + PodInfo: mustNewPodInfo(pod), + UnschedulablePlugins: sets.New("fooPlugin2", "fooPlugin3"), + PendingPlugins: sets.New("fooPlugin1"), + }}, }, wantActiveQPodNames: []string{"targetpod"}, wantInFlightPods: nil, @@ -436,26 +437,24 @@ func Test_InFlightPods(t *testing.T) { "": { AssignedPodAdd: { { - // it will be ignored because the hint fn returns QueueSkip that is weaker than queueHintReturnQueueImmediately from fooPlugin1. PluginName: "fooPlugin3", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, { - // it will be ignored because the hint fn returns QueueAfterBackoff that is weaker than queueHintReturnQueueImmediately from fooPlugin1. PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, { // The hint fn tells that this event makes a Pod scheudlable immediately. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, }, }, { - name: "pod is enqueued to backoffQ because the failed plugin has a hint fn and it returns QueueAfterBackoff", + name: "pod is enqueued to backoffQ because the failed plugin has a hint fn and it returns Queue", isSchedulingQueueHintEnabled: true, initialPods: []*v1.Pod{pod}, actions: []action{ @@ -470,21 +469,21 @@ func Test_InFlightPods(t *testing.T) { "": { AssignedPodAdd: { { - // it will be ignored because the hint fn returns QueueSkip that is weaker than queueHintReturnQueueAfterBackoff from fooPlugin1. + // it will be ignored because the hint fn returns Skip that is weaker than queueHintReturnQueue from fooPlugin1. PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, { - // The hint fn tells that this event makes a Pod scheudlable. + // The hint fn tells that this event makes a Pod schedulable. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, }, }, }, }, { - name: "pod is enqueued to activeQ because the failed plugin has a hint fn and it returns QueueImmediately for a concurrent event that was received while some other pod was in flight", + name: "pod is enqueued to activeQ because the failed plugin has a hint fn and it returns Queue for a concurrent event that was received while some other pod was in flight", isSchedulingQueueHintEnabled: true, initialPods: []*v1.Pod{pod, pod2}, actions: []action{ @@ -494,15 +493,18 @@ func Test_InFlightPods(t *testing.T) { {eventHappens: &AssignedPodAdd}, {callback: func(t *testing.T, q *PriorityQueue) { logger, _ := ktesting.NewTestContext(t) - if err := q.AddUnschedulableIfNotPresent(logger, poppedPod, q.SchedulingCycle()); err != nil { - t.Errorf("Unexpected error from AddUnschedulableIfNotPresent: %v", err) + err := q.AddUnschedulableIfNotPresent(logger, poppedPod, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) } }}, {callback: func(t *testing.T, q *PriorityQueue) { logger, _ := ktesting.NewTestContext(t) - poppedPod2.UnschedulablePlugins = sets.New("fooPlugin1", "fooPlugin2", "fooPlugin3") - if err := q.AddUnschedulableIfNotPresent(logger, poppedPod2, q.SchedulingCycle()); err != nil { - t.Errorf("Unexpected error from AddUnschedulableIfNotPresent: %v", err) + poppedPod2.UnschedulablePlugins = sets.New("fooPlugin2", "fooPlugin3") + poppedPod2.PendingPlugins = sets.New("fooPlugin1") + err := q.AddUnschedulableIfNotPresent(logger, poppedPod2, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) } }}, }, @@ -515,32 +517,34 @@ func Test_InFlightPods(t *testing.T) { { // it will be ignored because the hint fn returns QueueSkip that is weaker than queueHintReturnQueueImmediately from fooPlugin1. PluginName: "fooPlugin3", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, { - // it will be ignored because the hint fn returns QueueAfterBackoff that is weaker than queueHintReturnQueueImmediately from fooPlugin1. + // it will be ignored because the fooPlugin2 is registered in UnschedulablePlugins and it's interpret as Queue that is weaker than QueueImmediately from fooPlugin1. PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, { - // The hint fn tells that this event makes a Pod scheudlable immediately. + // The hint fn tells that this event makes a Pod scheudlable. + // Given fooPlugin1 is registered as Pendings, we interpret Queue as queueImmediately. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, }, }, { - name: "popped pod must have empty UnschedulablePlugins", + name: "popped pod must have empty UnschedulablePlugins and PendingPlugins", isSchedulingQueueHintEnabled: true, initialPods: []*v1.Pod{pod}, actions: []action{ {callback: func(t *testing.T, q *PriorityQueue) { poppedPod = popPod(t, q, pod) }}, {callback: func(t *testing.T, q *PriorityQueue) { logger, _ := ktesting.NewTestContext(t) - // Unschedulable. - poppedPod.UnschedulablePlugins = sets.New("fooPlugin1") + // Unschedulable due to PendingPlugins. + poppedPod.PendingPlugins = sets.New("fooPlugin1") + poppedPod.UnschedulablePlugins = sets.New("fooPlugin2") if err := q.AddUnschedulableIfNotPresent(logger, poppedPod, q.SchedulingCycle()); err != nil { t.Errorf("Unexpected error from AddUnschedulableIfNotPresent: %v", err) } @@ -566,7 +570,7 @@ func Test_InFlightPods(t *testing.T) { { // The hint fn tells that this event makes a Pod scheudlable immediately. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, @@ -597,7 +601,10 @@ func Test_InFlightPods(t *testing.T) { case action.eventHappens != nil: q.MoveAllToActiveOrBackoffQueue(logger, *action.eventHappens, nil, nil, nil) case action.podEnqueued != nil: - q.AddUnschedulableIfNotPresent(logger, action.podEnqueued, q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, action.podEnqueued, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } case action.callback != nil: action.callback(t, q) } @@ -689,9 +696,9 @@ func TestPop(t *testing.T) { "": { PvAdd: { { - // The hint fn tells that this event makes a Pod scheudlable immediately. + // The hint fn tells that this event makes a Pod scheudlable. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, @@ -708,7 +715,8 @@ func TestPop(t *testing.T) { // Simulate failed attempt that makes the pod unschedulable. poppedPod := popPod(t, q, pod) - poppedPod.UnschedulablePlugins = sets.New("fooPlugin1") + // We put register the plugin to PendingPlugins so that it's interpreted as queueImmediately and skip backoff. + poppedPod.PendingPlugins = sets.New("fooPlugin1") if err := q.AddUnschedulableIfNotPresent(logger, poppedPod, q.SchedulingCycle()); err != nil { t.Errorf("Unexpected error from AddUnschedulableIfNotPresent: %v", err) } @@ -718,8 +726,8 @@ func TestPop(t *testing.T) { // Now check result of Pop. poppedPod = popPod(t, q, pod) - if len(poppedPod.UnschedulablePlugins) > 0 { - t.Errorf("QueuedPodInfo from Pop should have empty UnschedulablePlugins, got instead: %+v", poppedPod) + if len(poppedPod.PendingPlugins) > 0 { + t.Errorf("QueuedPodInfo from Pop should have empty PendingPlugins, got instead: %+v", poppedPod) } }) } @@ -739,7 +747,10 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { } q.Add(logger, highPriNominatedPodInfo.Pod) - q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePodInfo.Pod), q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePodInfo.Pod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectedNominatedPods := &nominator{ nominatedPodToNode: map[types.UID]string{ unschedulablePodInfo.Pod.UID: "node1", @@ -813,8 +824,9 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) { }, } - if err := q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePod), oldCycle); err != nil { - t.Errorf("Failed to call AddUnschedulableIfNotPresent(%v): %v", unschedulablePod.Name, err) + err := q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePod, "plugin"), oldCycle) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) } } @@ -907,7 +919,10 @@ func TestPriorityQueue_Update(t *testing.T) { if p, err := q.Pop(); err != nil || p.Pod != medPriorityPodInfo.Pod { t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPodInfo.Pod.Name, p.Pod.Name) } - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(medPriorityPodInfo.Pod), q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(medPriorityPodInfo.Pod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } if len(q.unschedulablePods.podInfoMap) != 1 { t.Error("Expected unschedulablePods to be 1.") } @@ -920,9 +935,20 @@ func TestPriorityQueue_Update(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", updatedPod.Name, podGotFromBackoffQ.Name) } + // To simulate the pod is failed in scheduling in the real world, Pop() the pod from activeQ before testing AddUnschedulableIfNotPresent. + err = q.activeQ.Add(podInfo) + if err != nil { + t.Fatalf("unexpected error from activeQ.Add: %v", err) + } + if p, err := q.Pop(); err != nil || p.Pod != medPriorityPodInfo.Pod { + t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPodInfo.Pod.Name, p.Pod.Name) + } // updating a pod which is in unschedulable queue, and it is not backing off, // we will move it to active queue - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(medPriorityPodInfo.Pod), q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(medPriorityPodInfo.Pod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } if len(q.unschedulablePods.podInfoMap) != 1 { t.Error("Expected unschedulablePods to be 1.") } @@ -1198,7 +1224,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) { if (k+1)%(j+1) == 0 { m[""][events[j]] = append(m[""][events[j]], &QueueingHintFunction{ PluginName: plugins[k], - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }) } } @@ -1231,7 +1257,10 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) { // Random case. podInfo = q.newQueuedPodInfo(p, plugins[j%len(plugins)]) } - q.AddUnschedulableIfNotPresent(logger, podInfo, q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, podInfo, q.SchedulingCycle()) + if err != nil { + b.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } } b.StartTimer() @@ -1260,28 +1289,28 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithQueueingHint(t *testing. expectedQ string }{ { - name: "QueueImmediately queues pod to activeQ", - podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p)}, - hint: queueHintReturnQueueImmediately, + name: "Queue queues pod to activeQ", + podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p), PendingPlugins: sets.New("foo")}, + hint: queueHintReturnQueue, expectedQ: activeQ, }, { - name: "QueueAfterBackoff queues pod to backoffQ if Pod is backing off", - podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p)}, - hint: queueHintReturnQueueAfterBackoff, + name: "Queue queues pod to backoffQ if Pod is backing off", + podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p), UnschedulablePlugins: sets.New("foo")}, + hint: queueHintReturnQueue, expectedQ: backoffQ, }, { - name: "QueueAfterBackoff queues pod to activeQ if Pod is not backing off", - podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p)}, - hint: queueHintReturnQueueAfterBackoff, + name: "Queue queues pod to activeQ if Pod is not backing off", + podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p), UnschedulablePlugins: sets.New("foo")}, + hint: queueHintReturnQueue, duration: DefaultPodInitialBackoffDuration, // backoff is finished expectedQ: activeQ, }, { - name: "QueueSkip queues pod to unschedulablePods", - podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p)}, - hint: queueHintReturnQueueSkip, + name: "Skip queues pod to unschedulablePods", + podInfo: &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(p), UnschedulablePlugins: sets.New("foo")}, + hint: queueHintReturnSkip, expectedQ: unschedulablePods, }, } @@ -1304,8 +1333,10 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithQueueingHint(t *testing. if p, err := q.Pop(); err != nil || p.Pod != test.podInfo.Pod { t.Errorf("Expected: %v after Pop, but got: %v", test.podInfo.Pod.Name, p.Pod.Name) } - q.AddUnschedulableIfNotPresent(logger, test.podInfo, q.SchedulingCycle()) - + err := q.AddUnschedulableIfNotPresent(logger, test.podInfo, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } cl.Step(test.duration) q.MoveAllToActiveOrBackoffQueue(logger, NodeAdd, nil, nil, nil) @@ -1334,7 +1365,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { m[""][NodeAdd] = []*QueueingHintFunction{ { PluginName: "fooPlugin", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, } q := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(c), WithQueueingHintMapPerProfile(m)) @@ -1349,8 +1380,14 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPodInfo.Pod.Name, p.Pod.Name) } expectInFlightPods(t, q, unschedulablePodInfo.Pod.UID, highPriorityPodInfo.Pod.UID) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle()) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectInFlightPods(t, q) // Construct a Pod, but don't associate its scheduler failure to any plugin hpp1 := clonePod(highPriorityPodInfo.Pod, "hpp1") @@ -1359,7 +1396,11 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", hpp1, p.Pod.Name) } expectInFlightPods(t, q, hpp1.UID) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(hpp1), q.SchedulingCycle()) + // This Pod will go to backoffQ because no failure plugin is associated with it. + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(hpp1), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectInFlightPods(t, q) // Construct another Pod, and associate its scheduler failure to plugin "barPlugin". hpp2 := clonePod(highPriorityPodInfo.Pod, "hpp2") @@ -1368,9 +1409,14 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", hpp2, p.Pod.Name) } expectInFlightPods(t, q, hpp2.UID) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(hpp2, "barPlugin"), q.SchedulingCycle()) + // This Pod will go to the unschedulable Pod pool. + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(hpp2, "barPlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectInFlightPods(t, q) - // Pods is still backing off, move the pod into backoffQ. + // This NodeAdd event moves unschedulablePodInfo and highPriorityPodInfo to the backoffQ, + // because of the queueing hint function registered for NodeAdd/fooPlugin. q.MoveAllToActiveOrBackoffQueue(logger, NodeAdd, nil, nil, nil) q.Add(logger, medPriorityPodInfo.Pod) if q.activeQ.Len() != 1 { @@ -1408,24 +1454,42 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { if p, err := q.Pop(); err != nil || p.Pod != hpp1 { t.Errorf("Expected: %v after Pop, but got: %v", hpp1, p.Pod.Name) } + unschedulableQueuedPodInfo := q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin") + highPriorityQueuedPodInfo := q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin") + hpp1QueuedPodInfo := q.newQueuedPodInfo(hpp1) expectInFlightPods(t, q, medPriorityPodInfo.Pod.UID, unschedulablePodInfo.Pod.UID, highPriorityPodInfo.Pod.UID, hpp1.UID) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, unschedulableQueuedPodInfo, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectInFlightPods(t, q, medPriorityPodInfo.Pod.UID, highPriorityPodInfo.Pod.UID, hpp1.UID) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, highPriorityQueuedPodInfo, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectInFlightPods(t, q, medPriorityPodInfo.Pod.UID, hpp1.UID) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(hpp1), q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, hpp1QueuedPodInfo, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectInFlightPods(t, q, medPriorityPodInfo.Pod.UID) q.Add(logger, medPriorityPodInfo.Pod) - for _, pod := range []*v1.Pod{unschedulablePodInfo.Pod, highPriorityPodInfo.Pod, hpp1, hpp2} { + // hpp1 will go to backoffQ because no failure plugin is associated with it. + // All plugins other than hpp1 are enqueued to the unschedulable Pod pool. + for _, pod := range []*v1.Pod{unschedulablePodInfo.Pod, highPriorityPodInfo.Pod, hpp2} { if q.unschedulablePods.get(pod) == nil { t.Errorf("Expected %v in the unschedulablePods", pod.Name) } } + if _, ok, _ := q.podBackoffQ.Get(hpp1QueuedPodInfo); !ok { + t.Errorf("Expected %v in the podBackoffQ", hpp1.Name) + } + // Move clock by podInitialBackoffDuration, so that pods in the unschedulablePods would pass the backing off, // and the pods will be moved into activeQ. c.Step(q.podInitialBackoffDuration) + q.flushBackoffQCompleted(logger) // flush the completed backoffQ to move hpp1 to activeQ. q.MoveAllToActiveOrBackoffQueue(logger, NodeAdd, nil, nil, nil) - // hpp2 won't be moved regardless of its backoff timer. if q.activeQ.Len() != 4 { t.Errorf("Expected 4 items to be in activeQ, but got: %v", q.activeQ.Len()) } @@ -1433,6 +1497,10 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { t.Errorf("Expected 0 item to be in podBackoffQ, but got: %v", q.podBackoffQ.Len()) } expectInFlightPods(t, q, medPriorityPodInfo.Pod.UID) + if len(q.unschedulablePods.podInfoMap) != 1 { + // hpp2 won't be moved regardless of its backoff timer. + t.Errorf("Expected 1 item to be in unschedulablePods, but got: %v", len(q.unschedulablePods.podInfoMap)) + } } func clonePod(pod *v1.Pod, newName string) *v1.Pod { @@ -1479,7 +1547,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { m[""][AssignedPodAdd] = []*QueueingHintFunction{ { PluginName: "fakePlugin", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, } q := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(c), WithQueueingHintMapPerProfile(m)) @@ -1493,8 +1561,14 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", affinityPod.Name, p.Pod.Name) } q.Add(logger, medPriorityPodInfo.Pod) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fakePlugin"), q.SchedulingCycle()) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(affinityPod, "fakePlugin"), q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fakePlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(affinityPod, "fakePlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } // Move clock to make the unschedulable pods complete backoff. c.Step(DefaultPodInitialBackoffDuration + time.Second) @@ -1615,8 +1689,14 @@ func TestPriorityQueue_PendingPods(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPodInfo.Pod.Name, p.Pod.Name) } q.Add(logger, medPriorityPodInfo.Pod) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle()) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPriorityPodInfo.Pod), q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(unschedulablePodInfo.Pod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPriorityPodInfo.Pod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } expectedSet := makeSet([]*v1.Pod{medPriorityPodInfo.Pod, unschedulablePodInfo.Pod, highPriorityPodInfo.Pod}) gotPods, gotSummary := q.PendingPods() @@ -1904,8 +1984,12 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) { Message: "fake scheduling failure", LastProbeTime: metav1.Now(), }) + p1.UnschedulablePlugins = sets.New("plugin") // Put in the unschedulable queue. - q.AddUnschedulableIfNotPresent(logger, p1, q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, p1, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } c.Step(DefaultPodInitialBackoffDuration) // Move all unschedulable pods to the active queue. q.MoveAllToActiveOrBackoffQueue(logger, UnschedulableTimeout, nil, nil, nil) @@ -1952,7 +2036,10 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Pod.Name) } // Put in the unschedulable queue - q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePod), q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } // Move clock to make the unschedulable pods complete backoff. c.Step(DefaultPodInitialBackoffDuration + time.Second) // Move all unschedulable pods to the active queue. @@ -1982,7 +2069,10 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { }) // And then, put unschedulable pod to the unschedulable queue - q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePod), q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, newQueuedPodInfoForLookup(unschedulablePod, "plugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } // Move clock to make the unschedulable pods complete backoff. c.Step(DefaultPodInitialBackoffDuration + time.Second) // Move all unschedulable pods to the active queue. @@ -2028,7 +2118,10 @@ func TestHighPriorityBackoff(t *testing.T) { Message: "fake scheduling failure", }) // Put in the unschedulable queue. - q.AddUnschedulableIfNotPresent(logger, p, q.SchedulingCycle()) + err = q.AddUnschedulableIfNotPresent(logger, p, q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } // Move all unschedulable pods to the active queue. q.MoveAllToActiveOrBackoffQueue(logger, TestEvent, nil, nil, nil) @@ -2049,7 +2142,7 @@ func TestHighPriorityFlushUnschedulablePodsLeftover(t *testing.T) { m[""][NodeAdd] = []*QueueingHintFunction{ { PluginName: "fakePlugin", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, } logger, ctx := ktesting.NewTestContext(t) @@ -2084,8 +2177,14 @@ func TestHighPriorityFlushUnschedulablePodsLeftover(t *testing.T) { if p, err := q.Pop(); err != nil || p.Pod != midPod { t.Errorf("Expected: %v after Pop, but got: %v", midPod.Name, p.Pod.Name) } - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPod, "fakePlugin"), q.SchedulingCycle()) - q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(midPod, "fakePlugin"), q.SchedulingCycle()) + err := q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(highPod, "fakePlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } + err = q.AddUnschedulableIfNotPresent(logger, q.newQueuedPodInfo(midPod, "fakePlugin"), q.SchedulingCycle()) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } c.Step(DefaultPodMaxInUnschedulablePodsDuration + time.Second) q.flushUnschedulablePodsLeftover(logger) @@ -2159,7 +2258,7 @@ func TestPriorityQueue_initPodMaxInUnschedulablePodsDuration(t *testing.T) { var podInfoList []*framework.QueuedPodInfo for i, op := range test.operations { - op(logger, queue, test.operands[i]) + op(t, logger, queue, test.operands[i]) } expectedLen := len(test.expected) @@ -2182,31 +2281,58 @@ func TestPriorityQueue_initPodMaxInUnschedulablePodsDuration(t *testing.T) { } } -type operation func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) +type operation func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) var ( - add = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { - queue.Add(logger, pInfo.Pod) + add = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + if err := queue.Add(logger, pInfo.Pod); err != nil { + t.Fatalf("Unexpected error during Add: %v", err) + } } - addUnschedulablePodBackToUnschedulablePods = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + popAndRequeueAsUnschedulable = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { // To simulate the pod is failed in scheduling in the real world, Pop() the pod from activeQ before AddUnschedulableIfNotPresent() below. - queue.activeQ.Add(queue.newQueuedPodInfo(pInfo.Pod)) - if p, err := queue.Pop(); err != nil || p.Pod != pInfo.Pod { - panic(fmt.Sprintf("Expected: %v after Pop, but got: %v", pInfo.Pod.Name, p.Pod.Name)) + // UnschedulablePlugins will get cleared by Pop, so make a copy first. + unschedulablePlugins := pInfo.UnschedulablePlugins.Clone() + if err := queue.activeQ.Add(queue.newQueuedPodInfo(pInfo.Pod)); err != nil { + t.Fatalf("Unexpected error during Add: %v", err) + } + p, err := queue.Pop() + if err != nil { + t.Fatalf("Unexpected error during Pop: %v", err) + } + if p.Pod != pInfo.Pod { + t.Fatalf("Expected: %v after Pop, but got: %v", pInfo.Pod.Name, p.Pod.Name) + } + // Simulate plugins that are waiting for some events. + p.UnschedulablePlugins = unschedulablePlugins + if err := queue.AddUnschedulableIfNotPresent(logger, p, 1); err != nil { + t.Fatalf("Unexpected error during AddUnschedulableIfNotPresent: %v", err) } - - queue.AddUnschedulableIfNotPresent(logger, pInfo, 1) } - addUnschedulablePodBackToBackoffQ = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { - queue.AddUnschedulableIfNotPresent(logger, pInfo, 1) + popAndRequeueAsBackoff = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + // To simulate the pod is failed in scheduling in the real world, Pop() the pod from activeQ before AddUnschedulableIfNotPresent() below. + if err := queue.activeQ.Add(queue.newQueuedPodInfo(pInfo.Pod)); err != nil { + t.Fatalf("Unexpected error during Add: %v", err) + } + p, err := queue.Pop() + if err != nil { + t.Fatalf("Unexpected error during Pop: %v", err) + } + if p.Pod != pInfo.Pod { + t.Fatalf("Expected: %v after Pop, but got: %v", pInfo.Pod.Name, p.Pod.Name) + } + // When there is no known unschedulable plugin, pods always go to the backoff queue. + if err := queue.AddUnschedulableIfNotPresent(logger, p, 1); err != nil { + t.Fatalf("Unexpected error during AddUnschedulableIfNotPresent: %v", err) + } } - addPodActiveQ = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + addPodActiveQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { queue.activeQ.Add(pInfo) } - updatePodActiveQ = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + updatePodActiveQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { queue.activeQ.Update(pInfo) } - addPodUnschedulablePods = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + addPodUnschedulablePods = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { if !pInfo.Gated { // Update pod condition to unschedulable. podutil.UpdatePodCondition(&pInfo.Pod.Status, &v1.PodCondition{ @@ -2218,28 +2344,28 @@ var ( } queue.unschedulablePods.addOrUpdate(pInfo) } - deletePod = func(_ klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + deletePod = func(t *testing.T, _ klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { queue.Delete(pInfo.Pod) } - updatePodQueueable = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + updatePodQueueable = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { newPod := pInfo.Pod.DeepCopy() newPod.Labels = map[string]string{"queueable": ""} queue.Update(logger, pInfo.Pod, newPod) } - addPodBackoffQ = func(logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { + addPodBackoffQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, pInfo *framework.QueuedPodInfo) { queue.podBackoffQ.Add(pInfo) } - moveAllToActiveOrBackoffQ = func(logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { + moveAllToActiveOrBackoffQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { queue.MoveAllToActiveOrBackoffQueue(logger, UnschedulableTimeout, nil, nil, nil) } - flushBackoffQ = func(logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { + flushBackoffQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { queue.clock.(*testingclock.FakeClock).Step(2 * time.Second) queue.flushBackoffQCompleted(logger) } - moveClockForward = func(logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { + moveClockForward = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { queue.clock.(*testingclock.FakeClock).Step(2 * time.Second) } - flushUnschedulerQ = func(logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { + flushUnschedulerQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) { queue.clock.(*testingclock.FakeClock).Step(queue.podMaxInUnschedulablePodsDuration) queue.flushUnschedulablePodsLeftover(logger) } @@ -2317,7 +2443,7 @@ func TestPodTimestamp(t *testing.T) { var podInfoList []*framework.QueuedPodInfo for i, op := range test.operations { - op(logger, queue, test.operands[i]) + op(t, logger, queue, test.operands[i]) } expectedLen := len(test.expected) @@ -2602,7 +2728,7 @@ scheduler_plugin_execution_duration_seconds_count{extension_point="PreEnqueue",p queue := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp)), WithPreEnqueuePluginMap(m), WithPluginMetricsSamplePercent(test.pluginMetricsSamplePercent), WithMetricsRecorder(*recorder)) for i, op := range test.operations { for _, pInfo := range test.operands[i] { - op(logger, queue, pInfo) + op(t, logger, queue, pInfo) } } @@ -2649,6 +2775,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) { t.Fatalf("Failed to pop a pod %v", err) } + pInfo.UnschedulablePlugins = sets.New("plugin") queue.AddUnschedulableIfNotPresent(logger, pInfo, 1) // Override clock to exceed the DefaultPodMaxInUnschedulablePodsDuration so that unschedulable pods // will be moved to activeQ @@ -2668,6 +2795,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) { t.Fatalf("Failed to pop a pod %v", err) } + pInfo.UnschedulablePlugins = sets.New("plugin") queue.AddUnschedulableIfNotPresent(logger, pInfo, 1) // Override clock to exceed the DefaultPodMaxInUnschedulablePodsDuration so that unschedulable pods // will be moved to activeQ @@ -2758,7 +2886,7 @@ func TestIncomingPodsMetrics(t *testing.T) { { name: "add pods to unschedulablePods", operations: []operation{ - addUnschedulablePodBackToUnschedulablePods, + popAndRequeueAsUnschedulable, }, want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3 @@ -2767,7 +2895,7 @@ func TestIncomingPodsMetrics(t *testing.T) { { name: "add pods to unschedulablePods and then move all to backoffQ", operations: []operation{ - addUnschedulablePodBackToUnschedulablePods, + popAndRequeueAsUnschedulable, moveAllToActiveOrBackoffQ, }, want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3 @@ -2777,7 +2905,7 @@ func TestIncomingPodsMetrics(t *testing.T) { { name: "add pods to unschedulablePods and then move all to activeQ", operations: []operation{ - addUnschedulablePodBackToUnschedulablePods, + popAndRequeueAsUnschedulable, moveClockForward, moveAllToActiveOrBackoffQ, }, @@ -2788,7 +2916,7 @@ func TestIncomingPodsMetrics(t *testing.T) { { name: "make some pods subject to backoff and add them to backoffQ, then flush backoffQ", operations: []operation{ - addUnschedulablePodBackToBackoffQ, + popAndRequeueAsBackoff, moveClockForward, flushBackoffQ, }, @@ -2807,7 +2935,7 @@ func TestIncomingPodsMetrics(t *testing.T) { queue := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp))) for _, op := range test.operations { for _, pInfo := range pInfos { - op(logger, queue, pInfo) + op(t, logger, queue, pInfo) } } metricName := metrics.SchedulerSubsystem + "_" + metrics.SchedulerQueueIncomingPods.Name @@ -2857,8 +2985,9 @@ func TestBackOffFlow(t *testing.T) { if podInfo.Attempts != i+1 { t.Errorf("got attempts %d, want %d", podInfo.Attempts, i+1) } - if err := q.AddUnschedulableIfNotPresent(logger, podInfo, int64(i)); err != nil { - t.Fatal(err) + err = q.AddUnschedulableIfNotPresent(logger, podInfo, int64(i)) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) } // An event happens. @@ -2947,7 +3076,11 @@ func TestMoveAllToActiveOrBackoffQueue_PreEnqueueChecks(t *testing.T) { if p, err := q.Pop(); err != nil || p.Pod != podInfo.Pod { t.Errorf("Expected: %v after Pop, but got: %v", podInfo.Pod.Name, p.Pod.Name) } - q.AddUnschedulableIfNotPresent(logger, podInfo, q.schedulingCycle) + podInfo.UnschedulablePlugins = sets.New("plugin") + err := q.AddUnschedulableIfNotPresent(logger, podInfo, q.schedulingCycle) + if err != nil { + t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err) + } // NOTE: On Windows, time.Now() is not as precise, 2 consecutive calls may return the same timestamp, // resulting in 0 time delta / latency. This will cause the pods to be backed off in a random // order, which would cause this test to fail, since the expectation is for them to be backed off @@ -3050,17 +3183,17 @@ func mustNewPodInfo(pod *v1.Pod) *framework.PodInfo { // Test_isPodWorthRequeuing tests isPodWorthRequeuing function. func Test_isPodWorthRequeuing(t *testing.T) { count := 0 - queueHintReturnQueueImmediately := func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { + queueHintReturnQueue := func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { count++ - return framework.QueueImmediately + return framework.Queue, nil } - queueHintReturnQueueSkip := func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { + queueHintReturnSkip := func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { count++ - return framework.QueueSkip + return framework.QueueSkip, nil } - queueHintReturnQueueAfterBackoff := func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) framework.QueueingHint { + queueHintReturnErr := func(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { count++ - return framework.QueueAfterBackoff + return framework.QueueSkip, fmt.Errorf("unexpected error") } tests := []struct { @@ -3069,20 +3202,20 @@ func Test_isPodWorthRequeuing(t *testing.T) { event framework.ClusterEvent oldObj interface{} newObj interface{} - expected framework.QueueingHint + expected queueingStrategy expectedExecutionCount int // expected total execution count of queueing hint function queueingHintMap QueueingHintMapPerProfile }{ { - name: "return QueueAfterBackoff when no queueing hint function is registered for the event", + name: "return Queue when no queueing hint function is registered for the event", podInfo: &framework.QueuedPodInfo{ UnschedulablePlugins: sets.New("fooPlugin1"), PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), }, event: NodeAdd, oldObj: nil, - newObj: st.MakeNode().Node, - expected: framework.QueueSkip, + newObj: st.MakeNode().Obj(), + expected: queueSkip, expectedExecutionCount: 0, queueingHintMap: QueueingHintMapPerProfile{ "": { @@ -3091,95 +3224,116 @@ func Test_isPodWorthRequeuing(t *testing.T) { { // It will be ignored because the event is not NodeAdd. PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueImmediately, + QueueingHintFn: queueHintReturnQueue, }, }, }, }, }, { - name: "return QueueAfterBackoff when the event is wildcard", + name: "Treat the event as Queue when QueueHintFn returns error", + podInfo: &framework.QueuedPodInfo{ + UnschedulablePlugins: sets.New("fooPlugin1"), + PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), + }, + event: NodeAdd, + oldObj: nil, + newObj: st.MakeNode().Obj(), + expected: queueAfterBackoff, + expectedExecutionCount: 1, + queueingHintMap: QueueingHintMapPerProfile{ + "": { + NodeAdd: { + { + PluginName: "fooPlugin1", + QueueingHintFn: queueHintReturnErr, + }, + }, + }, + }, + }, + { + name: "return Queue when the event is wildcard", podInfo: &framework.QueuedPodInfo{ UnschedulablePlugins: sets.New("fooPlugin1"), PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), }, event: WildCardEvent, oldObj: nil, - newObj: st.MakeNode().Node, - expected: framework.QueueAfterBackoff, + newObj: st.MakeNode().Obj(), + expected: queueAfterBackoff, expectedExecutionCount: 0, queueingHintMap: QueueingHintMapPerProfile{}, }, { - name: "QueueImmediately is the highest priority", + name: "interprets Queue from the Pending plugin as queueImmediately", podInfo: &framework.QueuedPodInfo{ - UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2", "fooPlugin3", "fooPlugin4"), + UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin3"), + PendingPlugins: sets.New("fooPlugin2"), PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), }, event: NodeAdd, oldObj: nil, newObj: st.MakeNode().Node, - expected: framework.QueueImmediately, + expected: queueImmediately, expectedExecutionCount: 2, queueingHintMap: QueueingHintMapPerProfile{ "": { NodeAdd: { { - // executed - PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + PluginName: "fooPlugin1", + // It returns Queue and it's interpreted as queueAfterBackoff. + // But, the function continues to run other hints because the Pod has PendingPlugins, which can result in queueImmediately. + QueueingHintFn: queueHintReturnQueue, }, { - // executed - // But, no more queueing hint function is executed - // because the highest priority is QueueImmediately. - PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueImmediately, + PluginName: "fooPlugin2", + // It's interpreted as queueImmediately. + // The function doesn't run other hints because queueImmediately is the highest priority. + QueueingHintFn: queueHintReturnQueue, }, { PluginName: "fooPlugin3", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, { PluginName: "fooPlugin4", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnErr, }, }, }, }, }, { - name: "QueueSkip is the lowest priority", + name: "interprets Queue from the Unschedulable plugin as queueAfterBackoff", podInfo: &framework.QueuedPodInfo{ - UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2", "fooPlugin3"), + UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2"), PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), }, event: NodeAdd, oldObj: nil, - newObj: st.MakeNode().Node, - expected: framework.QueueAfterBackoff, - expectedExecutionCount: 3, + newObj: st.MakeNode().Obj(), + expected: queueAfterBackoff, + expectedExecutionCount: 2, queueingHintMap: QueueingHintMapPerProfile{ "": { NodeAdd: { { + // Skip will be ignored PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnSkip, }, { + // Skip will be ignored PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueSkip, - }, - { - PluginName: "fooPlugin3", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, }, }, }, }, { - name: "Queueing hint function that isn't from the plugin, that is in the UnschedulablePlugins, is ignored", + name: "Queueing hint function that isn't from the plugin in UnschedulablePlugins/PendingPlugins is ignored", podInfo: &framework.QueuedPodInfo{ UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2"), PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), @@ -3187,60 +3341,62 @@ func Test_isPodWorthRequeuing(t *testing.T) { event: NodeAdd, oldObj: nil, newObj: st.MakeNode().Node, - expected: framework.QueueAfterBackoff, + expected: queueSkip, expectedExecutionCount: 2, queueingHintMap: QueueingHintMapPerProfile{ "": { NodeAdd: { { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnSkip, }, { PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueSkip, + QueueingHintFn: queueHintReturnSkip, }, { PluginName: "fooPlugin3", - QueueingHintFn: queueHintReturnQueueImmediately, // It'll be ignored. + QueueingHintFn: queueHintReturnQueue, // It'll be ignored. }, }, }, }, }, { - name: "If event is specific Node update event, queueing hint function for NodeUpdate/UpdateNodeLabel is executed", + name: "If event is specific Node update event, queueing hint function for NodeUpdate/UpdateNodeLabel is also executed", podInfo: &framework.QueuedPodInfo{ UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2"), PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()), }, event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}, oldObj: nil, - newObj: st.MakeNode().Node, - expected: framework.QueueAfterBackoff, - expectedExecutionCount: 3, + newObj: st.MakeNode().Obj(), + expected: queueAfterBackoff, + expectedExecutionCount: 1, queueingHintMap: QueueingHintMapPerProfile{ "": { framework.ClusterEvent{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: { { - PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + PluginName: "fooPlugin1", + // It's only executed and interpreted as queueAfterBackoff. + // The function doesn't run other hints because this Pod doesn't have PendingPlugins. + QueueingHintFn: queueHintReturnQueue, }, { PluginName: "fooPlugin2", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, }, framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Update}: { { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, }, NodeAdd: { // not executed because NodeAdd is unrelated. { PluginName: "fooPlugin1", - QueueingHintFn: queueHintReturnQueueAfterBackoff, + QueueingHintFn: queueHintReturnQueue, }, }, }, diff --git a/pkg/scheduler/metrics/resources/resources_test.go b/pkg/scheduler/metrics/resources/resources_test.go index 34f82837a1abd..a1d2c1f3c44b5 100644 --- a/pkg/scheduler/metrics/resources/resources_test.go +++ b/pkg/scheduler/metrics/resources/resources_test.go @@ -32,6 +32,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/testutil" + "k8s.io/utils/ptr" ) type fakePodLister struct { @@ -108,10 +109,6 @@ kube_pod_resource_request{namespace="test",node="node-one",pod="foo",priority="" } func Test_podResourceCollector_CollectWithStability(t *testing.T) { - int32p := func(i int32) *int32 { - return &i - } - tests := []struct { name string @@ -291,7 +288,7 @@ func Test_podResourceCollector_CollectWithStability(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "foo"}, Spec: v1.PodSpec{ SchedulerName: "default-scheduler", - Priority: int32p(0), + Priority: ptr.To[int32](0), NodeName: "node-one", Containers: []v1.Container{ {Resources: v1.ResourceRequirements{Requests: v1.ResourceList{"cpu": resource.MustParse("1")}}}, diff --git a/pkg/scheduler/profile/profile_test.go b/pkg/scheduler/profile/profile_test.go index 6d0201bb24ae3..34eb0edbddca0 100644 --- a/pkg/scheduler/profile/profile_test.go +++ b/pkg/scheduler/profile/profile_test.go @@ -280,8 +280,8 @@ func (p *fakePlugin) Bind(context.Context, *framework.CycleState, *v1.Pod, strin return nil } -func newFakePlugin(name string) func(object runtime.Object, handle framework.Handle) (framework.Plugin, error) { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newFakePlugin(name string) func(ctx context.Context, object runtime.Object, handle framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &fakePlugin{name: name}, nil } } diff --git a/pkg/scheduler/schedule_one.go b/pkg/scheduler/schedule_one.go index 89a4e0ae1c1b8..64c1b7c768162 100644 --- a/pkg/scheduler/schedule_one.go +++ b/pkg/scheduler/schedule_one.go @@ -206,15 +206,15 @@ func (sched *Scheduler) schedulingCycle( logger.Error(forgetErr, "Scheduler cache ForgetPod failed") } - if sts.IsUnschedulable() { + if sts.IsRejected() { fitErr := &framework.FitError{ NumAllNodes: 1, Pod: pod, Diagnosis: framework.Diagnosis{ - NodeToStatusMap: framework.NodeToStatusMap{scheduleResult.SuggestedHost: sts}, - UnschedulablePlugins: sets.New(sts.FailedPlugin()), + NodeToStatusMap: framework.NodeToStatusMap{scheduleResult.SuggestedHost: sts}, }, } + fitErr.Diagnosis.AddPluginStatus(sts) return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, framework.NewStatus(sts.Code()).WithError(fitErr) } return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, sts @@ -229,15 +229,15 @@ func (sched *Scheduler) schedulingCycle( logger.Error(forgetErr, "Scheduler cache ForgetPod failed") } - if runPermitStatus.IsUnschedulable() { + if runPermitStatus.IsRejected() { fitErr := &framework.FitError{ NumAllNodes: 1, Pod: pod, Diagnosis: framework.Diagnosis{ - NodeToStatusMap: framework.NodeToStatusMap{scheduleResult.SuggestedHost: runPermitStatus}, - UnschedulablePlugins: sets.New(runPermitStatus.FailedPlugin()), + NodeToStatusMap: framework.NodeToStatusMap{scheduleResult.SuggestedHost: runPermitStatus}, }, } + fitErr.Diagnosis.AddPluginStatus(runPermitStatus) return ScheduleResult{nominatingInfo: clearNominatedNode}, assumedPodInfo, framework.NewStatus(runPermitStatus.Code()).WithError(fitErr) } @@ -269,13 +269,13 @@ func (sched *Scheduler) bindingCycle( // Run "permit" plugins. if status := fwk.WaitOnPermit(ctx, assumedPod); !status.IsSuccess() { - if status.IsUnschedulable() { + if status.IsRejected() { fitErr := &framework.FitError{ NumAllNodes: 1, Pod: assumedPodInfo.Pod, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{scheduleResult.SuggestedHost: status}, - UnschedulablePlugins: sets.New(status.FailedPlugin()), + UnschedulablePlugins: sets.New(status.Plugin()), }, } return framework.NewStatus(status.Code()).WithError(fitErr) @@ -336,7 +336,7 @@ func (sched *Scheduler) handleBindingCycleError( // Avoid moving the assumed Pod itself as it's always Unschedulable. // It's intentional to "defer" this operation; otherwise MoveAllToActiveOrBackoffQueue() would // update `q.moveRequest` and thus move the assumed pod to backoffQ anyways. - if status.IsUnschedulable() { + if status.IsRejected() { defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, internalqueue.AssignedPodDelete, assumedPod, nil, func(pod *v1.Pod) bool { return assumedPod.UID != pod.UID }) @@ -435,8 +435,7 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.Diagnosis, error) { logger := klog.FromContext(ctx) diagnosis := framework.Diagnosis{ - NodeToStatusMap: make(framework.NodeToStatusMap), - UnschedulablePlugins: sets.New[string](), + NodeToStatusMap: make(framework.NodeToStatusMap), } allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List() @@ -446,7 +445,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F // Run "prefilter" plugins. preRes, s := fwk.RunPreFilterPlugins(ctx, state, pod) if !s.IsSuccess() { - if !s.IsUnschedulable() { + if !s.IsRejected() { return nil, diagnosis, s.AsError() } // All nodes in NodeToStatusMap will have the same status so that they can be handled in the preemption. @@ -459,10 +458,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F msg := s.Message() diagnosis.PreFilterMsg = msg logger.V(5).Info("Status after running PreFilter plugins for pod", "pod", klog.KObj(pod), "status", msg) - // Status satisfying IsUnschedulable() gets injected into diagnosis.UnschedulablePlugins. - if s.FailedPlugin() != "" { - diagnosis.UnschedulablePlugins.Insert(s.FailedPlugin()) - } + diagnosis.AddPluginStatus(s) return nil, diagnosis, nil } @@ -490,7 +486,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F nodes = append(nodes, nInfo) } } - feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, diagnosis, nodes) + feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, nodes) // always try to update the sched.nextStartNodeIndex regardless of whether an error has occurred // this is helpful to make sure that all the nodes have a chance to be searched processedNodes := len(feasibleNodes) + len(diagnosis.NodeToStatusMap) @@ -513,7 +509,7 @@ func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, return nil, err } node := []*framework.NodeInfo{nodeInfo} - feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, diagnosis, node) + feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, node) if err != nil { return nil, err } @@ -532,7 +528,7 @@ func (sched *Scheduler) findNodesThatPassFilters( fwk framework.Framework, state *framework.CycleState, pod *v1.Pod, - diagnosis framework.Diagnosis, + diagnosis *framework.Diagnosis, nodes []*framework.NodeInfo) ([]*v1.Node, error) { numAllNodes := len(nodes) numNodesToFind := sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes)) @@ -573,7 +569,7 @@ func (sched *Scheduler) findNodesThatPassFilters( } else { statusesLock.Lock() diagnosis.NodeToStatusMap[nodeInfo.Node().Name] = status - diagnosis.UnschedulablePlugins.Insert(status.FailedPlugin()) + diagnosis.AddPluginStatus(status) statusesLock.Unlock() } } @@ -963,7 +959,7 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo logger := klog.FromContext(ctx) reason := v1.PodReasonSchedulerError - if status.IsUnschedulable() { + if status.IsRejected() { reason = v1.PodReasonUnschedulable } @@ -982,6 +978,7 @@ func (sched *Scheduler) handleSchedulingFailure(ctx context.Context, fwk framewo logger.V(2).Info("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod)) } else if fitError, ok := err.(*framework.FitError); ok { // Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently. podInfo.UnschedulablePlugins = fitError.Diagnosis.UnschedulablePlugins + podInfo.PendingPlugins = fitError.Diagnosis.PendingPlugins logger.V(2).Info("Unable to schedule pod; no fit; waiting", "pod", klog.KObj(pod), "err", errMsg) } else if apierrors.IsNotFound(err) { logger.V(2).Info("Unable to schedule pod, possibly due to node not found; waiting", "pod", klog.KObj(pod), "err", errMsg) diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index bfdb587c7ae32..846459d1dba67 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -53,6 +53,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" @@ -64,12 +65,14 @@ import ( internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/profile" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" schedutil "k8s.io/kubernetes/pkg/scheduler/util" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( - testSchedulerName = "test-scheduler" + testSchedulerName = "test-scheduler" + mb int64 = 1024 * 1024 ) var ( @@ -140,7 +143,7 @@ func (f *fakeExtender) IsInterested(pod *v1.Pod) bool { type falseMapPlugin struct{} func newFalseMapPlugin() frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &falseMapPlugin{}, nil } } @@ -160,7 +163,7 @@ func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions { type numericMapPlugin struct{} func newNumericMapPlugin() frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &numericMapPlugin{}, nil } } @@ -182,7 +185,7 @@ func (pl *numericMapPlugin) ScoreExtensions() framework.ScoreExtensions { } // NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it. -func NewNoPodsFilterPlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func NewNoPodsFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &noPodsFilterPlugin{}, nil } @@ -222,7 +225,7 @@ func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ *framewor } func newReverseNumericMapPlugin() frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &reverseNumericMapPlugin{}, nil } } @@ -251,7 +254,7 @@ func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleSta } func newTrueMapPlugin() frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &trueMapPlugin{}, nil } } @@ -268,7 +271,7 @@ func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, if len(nodeInfo.Pods) == 0 { return nil } - return framework.NewStatus(framework.Unschedulable, st.ErrReasonFake) + return framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake) } type fakeNodeSelectorArgs struct { @@ -290,7 +293,7 @@ func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ return nil } -func newFakeNodeSelector(args runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newFakeNodeSelector(_ context.Context, args runtime.Object, _ framework.Handle) (framework.Plugin, error) { pl := &fakeNodeSelector{} if err := frameworkruntime.DecodeInto(args, &pl.fakeNodeSelectorArgs); err != nil { return nil, err @@ -332,7 +335,7 @@ func (f *fakeNodeSelectorDependOnPodAnnotation) Filter(_ context.Context, _ *fra return nil } -func newFakeNodeSelectorDependOnPodAnnotation(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func newFakeNodeSelectorDependOnPodAnnotation(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &fakeNodeSelectorDependOnPodAnnotation{}, nil } @@ -597,7 +600,7 @@ func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) { go wait.Until(createPodsOneRound, 9*time.Millisecond, ctx.Done()) // Capture the events to wait all pods to be scheduled at least once. - allWaitSchedulingPods := sets.NewString() + allWaitSchedulingPods := sets.New[string]() for i := 0; i < waitSchedulingPodNumber; i++ { allWaitSchedulingPods.Insert(fmt.Sprintf("pod%d", i)) } @@ -633,7 +636,7 @@ func TestSchedulerScheduleOne(t *testing.T) { name string injectBindError error sendPod *v1.Pod - registerPluginFuncs []st.RegisterPluginFunc + registerPluginFuncs []tf.RegisterPluginFunc expectErrorPod *v1.Pod expectForgetPod *v1.Pod expectAssumedPod *v1.Pod @@ -646,8 +649,8 @@ func TestSchedulerScheduleOne(t *testing.T) { name: "error reserve pod", sendPod: podWithID("foo", ""), mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil}, - registerPluginFuncs: []st.RegisterPluginFunc{ - st.RegisterReservePlugin("FakeReserve", st.NewFakeReservePlugin(framework.NewStatus(framework.Error, "reserve error"))), + registerPluginFuncs: []tf.RegisterPluginFunc{ + tf.RegisterReservePlugin("FakeReserve", tf.NewFakeReservePlugin(framework.NewStatus(framework.Error, "reserve error"))), }, expectErrorPod: podWithID("foo", testNode.Name), expectForgetPod: podWithID("foo", testNode.Name), @@ -659,8 +662,8 @@ func TestSchedulerScheduleOne(t *testing.T) { name: "error permit pod", sendPod: podWithID("foo", ""), mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil}, - registerPluginFuncs: []st.RegisterPluginFunc{ - st.RegisterPermitPlugin("FakePermit", st.NewFakePermitPlugin(framework.NewStatus(framework.Error, "permit error"), time.Minute)), + registerPluginFuncs: []tf.RegisterPluginFunc{ + tf.RegisterPermitPlugin("FakePermit", tf.NewFakePermitPlugin(framework.NewStatus(framework.Error, "permit error"), time.Minute)), }, expectErrorPod: podWithID("foo", testNode.Name), expectForgetPod: podWithID("foo", testNode.Name), @@ -672,8 +675,8 @@ func TestSchedulerScheduleOne(t *testing.T) { name: "error prebind pod", sendPod: podWithID("foo", ""), mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil}, - registerPluginFuncs: []st.RegisterPluginFunc{ - st.RegisterPreBindPlugin("FakePreBind", st.NewFakePreBindPlugin(framework.AsStatus(preBindErr))), + registerPluginFuncs: []tf.RegisterPluginFunc{ + tf.RegisterPreBindPlugin("FakePreBind", tf.NewFakePreBindPlugin(framework.AsStatus(preBindErr))), }, expectErrorPod: podWithID("foo", testNode.Name), expectForgetPod: podWithID("foo", testNode.Name), @@ -747,12 +750,12 @@ func TestSchedulerScheduleOne(t *testing.T) { return true, gotBinding, item.injectBindError }) registerPluginFuncs := append(item.registerPluginFuncs, - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - fwk, err := st.NewFramework(ctx, + fwk, err := tf.NewFramework(ctx, registerPluginFuncs, testSchedulerName, frameworkruntime.WithClientSet(client), @@ -824,10 +827,10 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}} scache.AddNode(logger, &node) - fns := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"), + fns := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"), } scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(ctx, t, queuedPodStore, scache, pod, &node, fns...) @@ -889,10 +892,10 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { firstPod := podWithPort("pod.Name", "", 8080) node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}} scache.AddNode(logger, &node) - fns := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"), + fns := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"), } scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(ctx, t, queuedPodStore, scache, firstPod, &node, fns...) @@ -910,7 +913,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { NumAllNodes: 1, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - node.Name: framework.NewStatus(framework.Unschedulable, nodeports.ErrReason).WithFailedPlugin(nodeports.Name), + node.Name: framework.NewStatus(framework.Unschedulable, nodeports.ErrReason).WithPlugin(nodeports.Name), }, UnschedulablePlugins: sets.New(nodeports.Name), }, @@ -999,12 +1002,12 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) { framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceCPU), fmt.Sprintf("Insufficient %v", v1.ResourceMemory), - ).WithFailedPlugin(noderesources.Name) + ).WithPlugin(noderesources.Name) } - fns := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"), + fns := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"), } informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objects...), 0) @@ -1231,10 +1234,10 @@ func TestSchedulerBinding(t *testing.T) { _, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - fwk, err := st.NewFramework(ctx, - []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + fwk, err := tf.NewFramework(ctx, + []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, "", frameworkruntime.WithClientSet(client), frameworkruntime.WithEventRecorder(&events.FakeRecorder{})) if err != nil { t.Fatal(err) @@ -1586,7 +1589,7 @@ func Test_SelectHost(t *testing.T) { func TestFindNodesThatPassExtenders(t *testing.T) { tests := []struct { name string - extenders []st.FakeExtender + extenders []tf.FakeExtender nodes []*v1.Node filteredNodesStatuses framework.NodeToStatusMap expectsErr bool @@ -1595,10 +1598,10 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }{ { name: "error", - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, + Predicates: []tf.FitPredicate{tf.ErrorPredicateExtender}, }, }, nodes: makeNodeList([]string{"a"}), @@ -1607,10 +1610,10 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }, { name: "success", - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{st.TruePredicateExtender}, + Predicates: []tf.FitPredicate{tf.TruePredicateExtender}, }, }, nodes: makeNodeList([]string{"a"}), @@ -1621,10 +1624,10 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }, { name: "unschedulable", - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { + Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { if node.Name == "a" { return framework.NewStatus(framework.Success) } @@ -1642,10 +1645,10 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }, { name: "unschedulable and unresolvable", - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { + Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { if node.Name == "a" { return framework.NewStatus(framework.Success) } @@ -1667,10 +1670,10 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }, { name: "extender may overwrite the statuses", - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { + Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { if node.Name == "a" { return framework.NewStatus(framework.Success) } @@ -1694,10 +1697,10 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }, { name: "multiple extenders", - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { + Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { if node.Name == "a" { return framework.NewStatus(framework.Success) } @@ -1709,7 +1712,7 @@ func TestFindNodesThatPassExtenders(t *testing.T) { }, { ExtenderName: "FakeExtender1", - Predicates: []st.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { + Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { if node.Name == "a" { return framework.NewStatus(framework.Success) } @@ -1767,7 +1770,7 @@ func TestSchedulerSchedulePod(t *testing.T) { fts := feature.Features{} tests := []struct { name string - registerPlugins []st.RegisterPluginFunc + registerPlugins []tf.RegisterPluginFunc nodes []string pvcs []v1.PersistentVolumeClaim pod *v1.Pod @@ -1777,10 +1780,10 @@ func TestSchedulerSchedulePod(t *testing.T) { wErr error }{ { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("FalseFilter", st.NewFalseFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pod: st.MakePod().Name("2").UID("2").Obj(), @@ -1790,18 +1793,18 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 2, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "node1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"), - "node2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"), + "node1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"), + "node2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"), }, UnschedulablePlugins: sets.New("FalseFilter"), }, }, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pod: st.MakePod().Name("ignore").UID("ignore").Obj(), @@ -1811,10 +1814,10 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { // Fits on a node where the pod ID matches the node name - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pod: st.MakePod().Name("node2").UID("node2").Obj(), @@ -1823,11 +1826,11 @@ func TestSchedulerSchedulePod(t *testing.T) { wErr: nil, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"3", "2", "1"}, pod: st.MakePod().Name("ignore").UID("ignore").Obj(), @@ -1836,11 +1839,11 @@ func TestSchedulerSchedulePod(t *testing.T) { wErr: nil, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"3", "2", "1"}, pod: st.MakePod().Name("2").UID("2").Obj(), @@ -1849,12 +1852,12 @@ func TestSchedulerSchedulePod(t *testing.T) { wErr: nil, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"3", "2", "1"}, pod: st.MakePod().Name("2").UID("2").Obj(), @@ -1863,12 +1866,12 @@ func TestSchedulerSchedulePod(t *testing.T) { wErr: nil, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterFilterPlugin("FalseFilter", st.NewFalseFilterPlugin), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"3", "2", "1"}, pod: st.MakePod().Name("2").UID("2").Obj(), @@ -1878,21 +1881,21 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 3, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "3": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"), - "2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"), - "1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"), + "3": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"), + "2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"), + "1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"), }, UnschedulablePlugins: sets.New("FalseFilter"), }, }, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("NoPodsFilter", NewNoPodsFilterPlugin), - st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("NoPodsFilter", NewNoPodsFilterPlugin), + tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, pods: []*v1.Pod{ st.MakePod().Name("2").UID("2").Node("2").Phase(v1.PodRunning).Obj(), @@ -1905,8 +1908,8 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 2, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"), - "2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("NoPodsFilter"), + "1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"), + "2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("NoPodsFilter"), }, UnschedulablePlugins: sets.New("MatchFilter", "NoPodsFilter"), }, @@ -1914,11 +1917,11 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { // Pod with existing PVC - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pvcs: []v1.PersistentVolumeClaim{ @@ -1934,11 +1937,11 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { // Pod with non existing PVC - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(), @@ -1948,8 +1951,8 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 2, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithFailedPlugin("VolumeBinding"), - "node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithFailedPlugin("VolumeBinding"), + "node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithPlugin("VolumeBinding"), + "node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithPlugin("VolumeBinding"), }, PreFilterMsg: `persistentvolumeclaim "unknownPVC" not found`, UnschedulablePlugins: sets.New(volumebinding.Name), @@ -1958,11 +1961,11 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { // Pod with deleting PVC - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}}, @@ -1973,8 +1976,8 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 2, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithFailedPlugin("VolumeBinding"), - "node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithFailedPlugin("VolumeBinding"), + "node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithPlugin("VolumeBinding"), + "node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithPlugin("VolumeBinding"), }, PreFilterMsg: `persistentvolumeclaim "existingPVC" is being deleted`, UnschedulablePlugins: sets.New(volumebinding.Name), @@ -1982,12 +1985,12 @@ func TestSchedulerSchedulePod(t *testing.T) { }, }, { - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterScorePlugin("FalseMap", newFalseMapPlugin(), 1), - st.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterScorePlugin("FalseMap", newFalseMapPlugin(), 1), + tf.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"2", "1"}, pod: st.MakePod().Name("2").Obj(), @@ -1996,15 +1999,15 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test podtopologyspread plugin - 2 nodes with maxskew=1", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPluginAsExtensions( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPluginAsExtensions( podtopologyspread.Name, podTopologySpreadFunc, "PreFilter", "Filter", ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2"}, pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{ @@ -2023,15 +2026,15 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test podtopologyspread plugin - 3 nodes with maxskew=2", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPluginAsExtensions( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPluginAsExtensions( podtopologyspread.Name, podTopologySpreadFunc, "PreFilter", "Filter", ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{ @@ -2052,14 +2055,14 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test with filter plugin returning Unschedulable status", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin( "FakeFilter", - st.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.Unschedulable}), + tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.Unschedulable}), ), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"3"}, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), @@ -2069,7 +2072,7 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 1, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter").WithFailedPlugin("FakeFilter"), + "3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"), }, UnschedulablePlugins: sets.New("FakeFilter"), }, @@ -2077,14 +2080,14 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test with filter plugin returning UnschedulableAndUnresolvable status", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin( "FakeFilter", - st.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.UnschedulableAndUnresolvable}), + tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.UnschedulableAndUnresolvable}), ), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"3"}, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), @@ -2094,7 +2097,7 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 1, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter").WithFailedPlugin("FakeFilter"), + "3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"), }, UnschedulablePlugins: sets.New("FakeFilter"), }, @@ -2102,14 +2105,14 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test with partial failed filter plugin", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin( "FakeFilter", - st.NewFakeFilterPlugin(map[string]framework.Code{"1": framework.Unschedulable}), + tf.NewFakeFilterPlugin(map[string]framework.Code{"1": framework.Unschedulable}), ), - st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"1", "2"}, pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(), @@ -2118,13 +2121,13 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test prefilter plugin returning Unschedulable status", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin( "FakePreFilter", - st.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status")), + tf.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status")), ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"1", "2"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), @@ -2134,8 +2137,8 @@ func TestSchedulerSchedulePod(t *testing.T) { NumAllNodes: 2, Diagnosis: framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithFailedPlugin("FakePreFilter"), - "2": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithFailedPlugin("FakePreFilter"), + "1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithPlugin("FakePreFilter"), + "2": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithPlugin("FakePreFilter"), }, PreFilterMsg: "injected unschedulable status", UnschedulablePlugins: sets.New("FakePreFilter"), @@ -2144,13 +2147,13 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test prefilter plugin returning error status", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin( "FakePreFilter", - st.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.Error, "injected error status")), + tf.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.Error, "injected error status")), ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"1", "2"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), @@ -2159,44 +2162,44 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test prefilter plugin returning node", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin( "FakePreFilter1", - st.NewFakePreFilterPlugin("FakePreFilter1", nil, nil), + tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil), ), - st.RegisterPreFilterPlugin( + tf.RegisterPreFilterPlugin( "FakePreFilter2", - st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil), + tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil), ), - st.RegisterPreFilterPlugin( + tf.RegisterPreFilterPlugin( "FakePreFilter3", - st.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil), + tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil), ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: sets.New("node2"), - wantEvaluatedNodes: pointer.Int32(1), + wantEvaluatedNodes: ptr.To[int32](1), }, { name: "test prefilter plugin returning non-intersecting nodes", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin( "FakePreFilter1", - st.NewFakePreFilterPlugin("FakePreFilter1", nil, nil), + tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil), ), - st.RegisterPreFilterPlugin( + tf.RegisterPreFilterPlugin( "FakePreFilter2", - st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil), + tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil), ), - st.RegisterPreFilterPlugin( + tf.RegisterPreFilterPlugin( "FakePreFilter3", - st.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1")}, nil), + tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1")}, nil), ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), @@ -2216,17 +2219,17 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test prefilter plugin returning empty node set", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin( "FakePreFilter1", - st.NewFakePreFilterPlugin("FakePreFilter1", nil, nil), + tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil), ), - st.RegisterPreFilterPlugin( + tf.RegisterPreFilterPlugin( "FakePreFilter2", - st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New[string]()}, nil), + tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New[string]()}, nil), ), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), @@ -2244,25 +2247,25 @@ func TestSchedulerSchedulePod(t *testing.T) { }, { name: "test prefilter plugin returning skip", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterPreFilterPlugin( + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterPreFilterPlugin( "FakePreFilter1", - st.NewFakePreFilterPlugin("FakeFilter1", nil, nil), + tf.NewFakePreFilterPlugin("FakeFilter1", nil, nil), ), - st.RegisterFilterPlugin( + tf.RegisterFilterPlugin( "FakeFilter1", - st.NewFakeFilterPlugin(map[string]framework.Code{ + tf.NewFakeFilterPlugin(map[string]framework.Code{ "node1": framework.Unschedulable, }), ), - st.RegisterPluginAsExtensions("FakeFilter2", func(configuration runtime.Object, f framework.Handle) (framework.Plugin, error) { - return st.FakePreFilterAndFilterPlugin{ - FakePreFilterPlugin: &st.FakePreFilterPlugin{ + tf.RegisterPluginAsExtensions("FakeFilter2", func(_ context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) { + return tf.FakePreFilterAndFilterPlugin{ + FakePreFilterPlugin: &tf.FakePreFilterPlugin{ Result: nil, Status: framework.NewStatus(framework.Skip), }, - FakeFilterPlugin: &st.FakeFilterPlugin{ + FakeFilterPlugin: &tf.FakeFilterPlugin{ // This Filter plugin shouldn't be executed in the Filter extension point due to skip. // To confirm that, return the status code Error to all Nodes. FailedNodeReturnCodeMap: map[string]framework.Code{ @@ -2271,20 +2274,20 @@ func TestSchedulerSchedulePod(t *testing.T) { }, }, nil }, "PreFilter", "Filter"), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, nodes: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: sets.New("node2", "node3"), - wantEvaluatedNodes: pointer.Int32(3), + wantEvaluatedNodes: ptr.To[int32](3), }, { name: "test all prescore plugins return skip", - registerPlugins: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", st.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0, + registerPlugins: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0, framework.NewStatus(framework.Skip, "fake skip"), framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"), ), "PreScore", "Score"), @@ -2322,7 +2325,7 @@ func TestSchedulerSchedulePod(t *testing.T) { } } snapshot := internalcache.NewSnapshot(test.pods, nodes) - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, test.registerPlugins, "", frameworkruntime.WithSnapshotSharedLister(snapshot), @@ -2376,13 +2379,13 @@ func TestFindFitAllError(t *testing.T) { nodes := makeNodeList([]string{"3", "2", "1"}) scheduler := makeScheduler(ctx, nodes) - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, - []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)), @@ -2398,9 +2401,9 @@ func TestFindFitAllError(t *testing.T) { expected := framework.Diagnosis{ NodeToStatusMap: framework.NodeToStatusMap{ - "1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"), - "2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"), - "3": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"), + "1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"), + "2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"), + "3": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"), }, UnschedulablePlugins: sets.New("MatchFilter"), } @@ -2416,13 +2419,13 @@ func TestFindFitSomeError(t *testing.T) { nodes := makeNodeList([]string{"3", "2", "1"}) scheduler := makeScheduler(ctx, nodes) - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, - []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterFilterPlugin("MatchFilter", st.NewMatchFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)), @@ -2455,7 +2458,7 @@ func TestFindFitSomeError(t *testing.T) { t.Errorf("failed to find node %v in %v", node.Name, diagnosis.NodeToStatusMap) } reasons := status.Reasons() - if len(reasons) != 1 || reasons[0] != st.ErrReasonFake { + if len(reasons) != 1 || reasons[0] != tf.ErrReasonFake { t.Errorf("unexpected failures: %v", reasons) } }) @@ -2484,22 +2487,22 @@ func TestFindFitPredicateCallCounts(t *testing.T) { t.Run(test.name, func(t *testing.T) { nodes := makeNodeList([]string{"1"}) - plugin := st.FakeFilterPlugin{} - registerFakeFilterFunc := st.RegisterFilterPlugin( + plugin := tf.FakeFilterPlugin{} + registerFakeFilterFunc := tf.RegisterFilterPlugin( "FakeFilter", - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return &plugin, nil }, ) - registerPlugins := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + registerPlugins := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), registerFakeFilterFunc, - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), } logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, registerPlugins, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)), @@ -2629,15 +2632,15 @@ func TestZeroRequest(t *testing.T) { snapshot := internalcache.NewSnapshot(test.pods, test.nodes) fts := feature.Features{} - pluginRegistrations := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1), - st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + pluginRegistrations := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1), + tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, pluginRegistrations, "", frameworkruntime.WithInformerFactory(informerFactory), @@ -2675,24 +2678,77 @@ func TestZeroRequest(t *testing.T) { } func Test_prioritizeNodes(t *testing.T) { + imageStatus1 := []v1.ContainerImage{ + { + Names: []string{ + "gcr.io/40:latest", + "gcr.io/40:v1", + }, + SizeBytes: int64(80 * mb), + }, + { + Names: []string{ + "gcr.io/300:latest", + "gcr.io/300:v1", + }, + SizeBytes: int64(300 * mb), + }, + } + + imageStatus2 := []v1.ContainerImage{ + { + Names: []string{ + "gcr.io/300:latest", + }, + SizeBytes: int64(300 * mb), + }, + { + Names: []string{ + "gcr.io/40:latest", + "gcr.io/40:v1", + }, + SizeBytes: int64(80 * mb), + }, + } + + imageStatus3 := []v1.ContainerImage{ + { + Names: []string{ + "gcr.io/600:latest", + }, + SizeBytes: int64(600 * mb), + }, + { + Names: []string{ + "gcr.io/40:latest", + }, + SizeBytes: int64(80 * mb), + }, + { + Names: []string{ + "gcr.io/900:latest", + }, + SizeBytes: int64(900 * mb), + }, + } tests := []struct { name string pod *v1.Pod pods []*v1.Pod nodes []*v1.Node - pluginRegistrations []st.RegisterPluginFunc - extenders []st.FakeExtender + pluginRegistrations []tf.RegisterPluginFunc + extenders []tf.FakeExtender want []framework.NodePluginScores }{ { name: "the score from all plugins should be recorded in PluginToNodeScores", pod: &v1.Pod{}, nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)}, - pluginRegistrations: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1), - st.RegisterScorePlugin("Node2Prioritizer", st.NewNode2PrioritizerPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + pluginRegistrations: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1), + tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, extenders: nil, want: []framework.NodePluginScores{ @@ -2730,29 +2786,29 @@ func Test_prioritizeNodes(t *testing.T) { name: "the score from extender should also be recorded in PluginToNodeScores with plugin scores", pod: &v1.Pod{}, nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)}, - pluginRegistrations: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + pluginRegistrations: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, - extenders: []st.FakeExtender{ + extenders: []tf.FakeExtender{ { ExtenderName: "FakeExtender1", Weight: 1, - Prioritizers: []st.PriorityConfig{ + Prioritizers: []tf.PriorityConfig{ { Weight: 3, - Function: st.Node1PrioritizerExtender, + Function: tf.Node1PrioritizerExtender, }, }, }, { ExtenderName: "FakeExtender2", Weight: 1, - Prioritizers: []st.PriorityConfig{ + Prioritizers: []tf.PriorityConfig{ { Weight: 2, - Function: st.Node2PrioritizerExtender, + Function: tf.Node2PrioritizerExtender, }, }, }, @@ -2801,12 +2857,12 @@ func Test_prioritizeNodes(t *testing.T) { name: "plugin which returned skip in preScore shouldn't be executed in the score phase", pod: &v1.Pod{}, nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)}, - pluginRegistrations: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1), - st.RegisterScorePlugin("Node2Prioritizer", st.NewNode2PrioritizerPlugin(), 1), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", st.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0, + pluginRegistrations: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1), + tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0, framework.NewStatus(framework.Skip, "fake skip"), framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"), ), "PreScore", "Score"), @@ -2847,10 +2903,10 @@ func Test_prioritizeNodes(t *testing.T) { name: "all score plugins are skipped", pod: &v1.Pod{}, nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)}, - pluginRegistrations: []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", st.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0, + pluginRegistrations: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0, framework.NewStatus(framework.Skip, "fake skip"), framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"), ), "PreScore", "Score"), @@ -2861,6 +2917,115 @@ func Test_prioritizeNodes(t *testing.T) { {Name: "node2", Scores: []framework.PluginScore{}}, }, }, + { + name: "the score from Image Locality plugin with image in all nodes", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Image: "gcr.io/40", + }, + }, + }, + }, + nodes: []*v1.Node{ + makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10, imageStatus1...), + makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10, imageStatus2...), + makeNode("node3", 1000, schedutil.DefaultMemoryRequest*10, imageStatus3...), + }, + pluginRegistrations: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterScorePlugin(imagelocality.Name, imagelocality.New, 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + }, + extenders: nil, + want: []framework.NodePluginScores{ + { + Name: "node1", + Scores: []framework.PluginScore{ + { + Name: "ImageLocality", + Score: 5, + }, + }, + TotalScore: 5, + }, + { + Name: "node2", + Scores: []framework.PluginScore{ + { + Name: "ImageLocality", + Score: 5, + }, + }, + TotalScore: 5, + }, + { + Name: "node3", + Scores: []framework.PluginScore{ + { + Name: "ImageLocality", + Score: 5, + }, + }, + TotalScore: 5, + }, + }, + }, + { + name: "the score from Image Locality plugin with image in partial nodes", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Image: "gcr.io/300", + }, + }, + }, + }, + nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10, imageStatus1...), + makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10, imageStatus2...), + makeNode("node3", 1000, schedutil.DefaultMemoryRequest*10, imageStatus3...), + }, + pluginRegistrations: []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterScorePlugin(imagelocality.Name, imagelocality.New, 1), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + }, + extenders: nil, + want: []framework.NodePluginScores{ + { + Name: "node1", + Scores: []framework.PluginScore{ + { + Name: "ImageLocality", + Score: 18, + }, + }, + TotalScore: 18, + }, + { + Name: "node2", + Scores: []framework.PluginScore{ + { + Name: "ImageLocality", + Score: 18, + }, + }, + TotalScore: 18, + }, + { + Name: "node3", + Scores: []framework.PluginScore{ + { + Name: "ImageLocality", + Score: 0, + }, + }, + TotalScore: 0, + }, + }, + }, } for _, test := range tests { @@ -2868,10 +3033,17 @@ func Test_prioritizeNodes(t *testing.T) { client := clientsetfake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(client, 0) - snapshot := internalcache.NewSnapshot(test.pods, test.nodes) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - fwk, err := st.NewFramework( + cache := internalcache.New(ctx, time.Duration(0)) + for _, node := range test.nodes { + cache.AddNode(klog.FromContext(ctx), node) + } + snapshot := internalcache.NewEmptySnapshot() + if err := cache.UpdateSnapshot(klog.FromContext(ctx), snapshot); err != nil { + t.Fatal(err) + } + fwk, err := tf.NewFramework( ctx, test.pluginRegistrations, "", frameworkruntime.WithInformerFactory(informerFactory), @@ -2922,7 +3094,7 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, { name: "set profile percentageOfNodesToScore and nodes number not more than 50", - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 10, wantNumNodes: 10, }, @@ -2933,14 +3105,14 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, { name: "set profile percentageOfNodesToScore and nodes number more than 50", - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 1000, wantNumNodes: 400, }, { name: "set global and profile percentageOfNodesToScore and nodes number more than 50", globalPercentage: 100, - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 1000, wantNumNodes: 400, }, @@ -2957,7 +3129,7 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, { name: "set profile percentageOfNodesToScore and nodes number more than 50*125", - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 6000, wantNumNodes: 2400, }, @@ -2986,12 +3158,12 @@ func TestFairEvaluationForNodes(t *testing.T) { defer cancel() sched := makeScheduler(ctx, nodes) - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, - []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterFilterPlugin("TrueFilter", st.NewTrueFilterPlugin), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), }, "", frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)), @@ -3059,19 +3231,19 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) { for _, n := range nodes { cache.AddNode(logger, n) } - plugin := st.FakeFilterPlugin{FailedNodeReturnCodeMap: test.nodeReturnCodeMap} - registerFakeFilterFunc := st.RegisterFilterPlugin( + plugin := tf.FakeFilterPlugin{FailedNodeReturnCodeMap: test.nodeReturnCodeMap} + registerFakeFilterFunc := tf.RegisterFilterPlugin( "FakeFilter", - func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) { + func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) { return &plugin, nil }, ) - registerPlugins := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + registerPlugins := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), registerFakeFilterFunc, - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), } - fwk, err := st.NewFramework( + fwk, err := tf.NewFramework( ctx, registerPlugins, "", frameworkruntime.WithClientSet(client), @@ -3150,7 +3322,7 @@ func makeScheduler(ctx context.Context, nodes []*v1.Node) *Scheduler { return sched } -func makeNode(node string, milliCPU, memory int64) *v1.Node { +func makeNode(node string, milliCPU, memory int64, images ...v1.ContainerImage) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: node}, Status: v1.NodeStatus{ @@ -3165,6 +3337,7 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), "pods": *resource.NewQuantity(100, resource.DecimalSI), }, + Images: images, }, } } @@ -3172,7 +3345,7 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { // queuedPodStore: pods queued before processing. // cache: scheduler cache that might contain assumed pods. func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, scache internalcache.Cache, - pod *v1.Pod, node *v1.Node, fns ...st.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) { + pod *v1.Pod, node *v1.Node, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) { scheduler, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, nil, nil, fns...) queuedPodStore.Add(pod) @@ -3200,7 +3373,7 @@ func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queue // queuedPodStore: pods queued before processing. // scache: scheduler cache that might contain assumed pods. -func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...st.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) { +func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) { bindingChan := make(chan *v1.Binding, 1) client := clientsetfake.NewSimpleClientset() client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { @@ -3224,7 +3397,7 @@ func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clien } schedulingQueue := internalqueue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory) - fwk, _ := st.NewFramework( + fwk, _ := tf.NewFramework( ctx, fns, testSchedulerName, @@ -3275,10 +3448,10 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volu pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() pvcInformer.Informer().GetStore().Add(&testPVC) - fns := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), - st.RegisterPluginAsExtensions(volumebinding.Name, func(plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) { + fns := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterPluginAsExtensions(volumebinding.Name, func(ctx context.Context, plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) { return &volumebinding.VolumeBinding{Binder: volumeBinder, PVCLister: pvcInformer.Lister()}, nil }, "PreFilter", "Filter", "Reserve", "PreBind"), } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index e315ec0fc293c..d7752001ec17a 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -363,9 +363,9 @@ func New(ctx context.Context, } // defaultQueueingHintFn is the default queueing hint function. -// It always returns QueueAfterBackoff as the queueing hint. -var defaultQueueingHintFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) framework.QueueingHint { - return framework.QueueAfterBackoff +// It always returns Queue as the queueing hint. +var defaultQueueingHintFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (framework.QueueingHint, error) { + return framework.Queue, nil } func buildQueueingHintMap(es []framework.EnqueueExtensions) internalqueue.QueueingHintMap { diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index a85f04261e17f..33dc47a4d8c69 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -52,8 +52,9 @@ import ( internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/profile" st "k8s.io/kubernetes/pkg/scheduler/testing" + tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" testingclock "k8s.io/utils/clock/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestSchedulerCreation(t *testing.T) { @@ -437,7 +438,7 @@ func TestWithPercentageOfNodesToScore(t *testing.T) { }, { name: "percentageOfNodesScore is not nil", - percentageOfNodesToScoreConfig: pointer.Int32(10), + percentageOfNodesToScoreConfig: ptr.To[int32](10), wantedPercentageOfNodesToScore: 10, }, } @@ -498,12 +499,12 @@ func getPodFromPriorityQueue(queue *internalqueue.PriorityQueue, pod *v1.Pod) *v func initScheduler(ctx context.Context, cache internalcache.Cache, queue internalqueue.SchedulingQueue, client kubernetes.Interface, informerFactory informers.SharedInformerFactory) (*Scheduler, framework.Framework, error) { logger := klog.FromContext(ctx) - registerPluginFuncs := []st.RegisterPluginFunc{ - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + registerPluginFuncs := []tf.RegisterPluginFunc{ + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), } eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) - fwk, err := st.NewFramework(ctx, + fwk, err := tf.NewFramework(ctx, registerPluginFuncs, testSchedulerName, frameworkruntime.WithClientSet(client), @@ -537,9 +538,9 @@ func TestInitPluginsWithIndexers(t *testing.T) { { name: "register indexer, no conflicts", entrypoints: map[string]frameworkruntime.PluginFactory{ - "AddIndexer": func(obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { + "AddIndexer": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { podInformer := handle.SharedInformerFactory().Core().V1().Pods() - err := podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ + err := podInformer.Informer().AddIndexers(cache.Indexers{ "nodeName": indexByPodSpecNodeName, }) return &TestPlugin{name: "AddIndexer"}, err @@ -550,16 +551,16 @@ func TestInitPluginsWithIndexers(t *testing.T) { name: "register the same indexer name multiple times, conflict", // order of registration doesn't matter entrypoints: map[string]frameworkruntime.PluginFactory{ - "AddIndexer1": func(obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { + "AddIndexer1": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { podInformer := handle.SharedInformerFactory().Core().V1().Pods() - err := podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ + err := podInformer.Informer().AddIndexers(cache.Indexers{ "nodeName": indexByPodSpecNodeName, }) return &TestPlugin{name: "AddIndexer1"}, err }, - "AddIndexer2": func(obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { + "AddIndexer2": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { podInformer := handle.SharedInformerFactory().Core().V1().Pods() - err := podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ + err := podInformer.Informer().AddIndexers(cache.Indexers{ "nodeName": indexByPodAnnotationNodeName, }) return &TestPlugin{name: "AddIndexer1"}, err @@ -571,16 +572,16 @@ func TestInitPluginsWithIndexers(t *testing.T) { name: "register the same indexer body with different names, no conflicts", // order of registration doesn't matter entrypoints: map[string]frameworkruntime.PluginFactory{ - "AddIndexer1": func(obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { + "AddIndexer1": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { podInformer := handle.SharedInformerFactory().Core().V1().Pods() - err := podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ + err := podInformer.Informer().AddIndexers(cache.Indexers{ "nodeName1": indexByPodSpecNodeName, }) return &TestPlugin{name: "AddIndexer1"}, err }, - "AddIndexer2": func(obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { + "AddIndexer2": func(ctx context.Context, obj runtime.Object, handle framework.Handle) (framework.Plugin, error) { podInformer := handle.SharedInformerFactory().Core().V1().Pods() - err := podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{ + err := podInformer.Informer().AddIndexers(cache.Indexers{ "nodeName2": indexByPodAnnotationNodeName, }) return &TestPlugin{name: "AddIndexer2"}, err @@ -593,22 +594,22 @@ func TestInitPluginsWithIndexers(t *testing.T) { t.Run(tt.name, func(t *testing.T) { fakeInformerFactory := NewInformerFactory(&fake.Clientset{}, 0*time.Second) - var registerPluginFuncs []st.RegisterPluginFunc + var registerPluginFuncs []tf.RegisterPluginFunc for name, entrypoint := range tt.entrypoints { registerPluginFuncs = append(registerPluginFuncs, // anything supported by TestPlugin is fine - st.RegisterFilterPlugin(name, entrypoint), + tf.RegisterFilterPlugin(name, entrypoint), ) } // we always need this registerPluginFuncs = append(registerPluginFuncs, - st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), - st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), + tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), + tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), ) _, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() - _, err := st.NewFramework(ctx, registerPluginFuncs, "test", frameworkruntime.WithInformerFactory(fakeInformerFactory)) + _, err := tf.NewFramework(ctx, registerPluginFuncs, "test", frameworkruntime.WithInformerFactory(fakeInformerFactory)) if len(tt.wantErr) > 0 { if err == nil || !strings.Contains(err.Error(), tt.wantErr) { @@ -818,13 +819,15 @@ func Test_buildQueueingHintMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SchedulerQueueingHints, !tt.featuregateDisabled)() - logger, _ := ktesting.NewTestContext(t) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() registry := frameworkruntime.Registry{} cfgPls := &schedulerapi.Plugins{} plugins := append(tt.plugins, &fakebindPlugin{}, &fakeQueueSortPlugin{}) for _, pl := range plugins { tmpPl := pl - if err := registry.Register(pl.Name(), func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("fail to register filter plugin (%s)", pl.Name()) @@ -833,9 +836,7 @@ func Test_buildQueueingHintMap(t *testing.T) { } profile := schedulerapi.KubeSchedulerProfile{Plugins: cfgPls} - stopCh := make(chan struct{}) - defer close(stopCh) - fwk, err := newFramework(registry, profile, stopCh) + fwk, err := newFramework(ctx, registry, profile) if err != nil { t.Fatal(err) } @@ -863,8 +864,10 @@ func Test_buildQueueingHintMap(t *testing.T) { t.Errorf("got plugin name %v, want %v", fn.PluginName, wantfns[i].PluginName) continue } - if fn.QueueingHintFn(logger, nil, nil, nil) != wantfns[i].QueueingHintFn(logger, nil, nil, nil) { - t.Errorf("got queueing hint function (%v) returning %v, expect it to return %v", fn.PluginName, fn.QueueingHintFn(logger, nil, nil, nil), wantfns[i].QueueingHintFn(logger, nil, nil, nil)) + got, gotErr := fn.QueueingHintFn(logger, nil, nil, nil) + want, wantErr := wantfns[i].QueueingHintFn(logger, nil, nil, nil) + if got != want || gotErr != wantErr { + t.Errorf("got queueing hint function (%v) returning (%v, %v), expect it to return (%v, %v)", fn.PluginName, got, gotErr, want, wantErr) continue } } @@ -1009,13 +1012,16 @@ func Test_UnionedGVKs(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() registry := plugins.NewInTreeRegistry() cfgPls := &schedulerapi.Plugins{MultiPoint: tt.plugins} plugins := []framework.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}, &fakeNoopPlugin{}, &fakeNoopRuntimePlugin{}, &fakeQueueSortPlugin{}, &fakebindPlugin{}} for _, pl := range plugins { tmpPl := pl - if err := registry.Register(pl.Name(), func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + if err := registry.Register(pl.Name(), func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return tmpPl, nil }); err != nil { t.Fatalf("fail to register filter plugin (%s)", pl.Name()) @@ -1023,9 +1029,7 @@ func Test_UnionedGVKs(t *testing.T) { } profile := schedulerapi.KubeSchedulerProfile{Plugins: cfgPls, PluginConfig: defaults.PluginConfigsV1} - stopCh := make(chan struct{}) - defer close(stopCh) - fwk, err := newFramework(registry, profile, stopCh) + fwk, err := newFramework(ctx, registry, profile) if err != nil { t.Fatal(err) } @@ -1042,8 +1046,8 @@ func Test_UnionedGVKs(t *testing.T) { } } -func newFramework(r frameworkruntime.Registry, profile schedulerapi.KubeSchedulerProfile, stopCh <-chan struct{}) (framework.Framework, error) { - return frameworkruntime.NewFramework(context.Background(), r, &profile, +func newFramework(ctx context.Context, r frameworkruntime.Registry, profile schedulerapi.KubeSchedulerProfile) (framework.Framework, error) { + return frameworkruntime.NewFramework(ctx, r, &profile, frameworkruntime.WithSnapshotSharedLister(internalcache.NewSnapshot(nil, nil)), frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)), ) @@ -1088,8 +1092,8 @@ var hintFromFakeNode = framework.QueueingHint(100) type fakeNodePlugin struct{} -var fakeNodePluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) framework.QueueingHint { - return hintFromFakeNode +var fakeNodePluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (framework.QueueingHint, error) { + return hintFromFakeNode, nil } func (*fakeNodePlugin) Name() string { return fakeNode } @@ -1098,7 +1102,7 @@ func (*fakeNodePlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1. return nil } -func (*fakeNodePlugin) EventsToRegister() []framework.ClusterEventWithHint { +func (pl *fakeNodePlugin) EventsToRegister() []framework.ClusterEventWithHint { return []framework.ClusterEventWithHint{ {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add}, QueueingHintFn: fakeNodePluginQueueingFn}, } @@ -1108,8 +1112,8 @@ var hintFromFakePod = framework.QueueingHint(101) type fakePodPlugin struct{} -var fakePodPluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) framework.QueueingHint { - return hintFromFakePod +var fakePodPluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (framework.QueueingHint, error) { + return hintFromFakePod, nil } func (*fakePodPlugin) Name() string { return fakePod } @@ -1118,7 +1122,7 @@ func (*fakePodPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.P return nil } -func (*fakePodPlugin) EventsToRegister() []framework.ClusterEventWithHint { +func (pl *fakePodPlugin) EventsToRegister() []framework.ClusterEventWithHint { return []framework.ClusterEventWithHint{ {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Add}, QueueingHintFn: fakePodPluginQueueingFn}, } diff --git a/pkg/scheduler/testing/fake_extender.go b/pkg/scheduler/testing/framework/fake_extender.go similarity index 98% rename from pkg/scheduler/testing/fake_extender.go rename to pkg/scheduler/testing/framework/fake_extender.go index 903fbebdf2a5d..49126af22479c 100644 --- a/pkg/scheduler/testing/fake_extender.go +++ b/pkg/scheduler/testing/framework/fake_extender.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testing +package framework import ( "context" @@ -112,7 +112,7 @@ type node2PrioritizerPlugin struct{} // NewNode2PrioritizerPlugin returns a factory function to build node2PrioritizerPlugin. func NewNode2PrioritizerPlugin() frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &node2PrioritizerPlugin{}, nil } } @@ -223,7 +223,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node) err := f.runPredicate(pod, node) if err.IsSuccess() { return []*v1.Pod{}, 0, true, nil - } else if err.IsUnschedulable() { + } else if err.IsRejected() { return nil, 0, false, nil } else { return nil, 0, false, err.AsError() @@ -232,7 +232,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node) // Otherwise, as a extender support preemption and have cached node info, we will assume cachedNodeNameToInfo is available // and get cached node info by given node name. - nodeInfoCopy := f.CachedNodeNameToInfo[node.GetName()].Clone() + nodeInfoCopy := f.CachedNodeNameToInfo[node.GetName()].Snapshot() var potentialVictims []*v1.Pod @@ -258,7 +258,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node) status := f.runPredicate(pod, nodeInfoCopy.Node()) if status.IsSuccess() { // pass - } else if status.IsUnschedulable() { + } else if status.IsRejected() { // does not fit return nil, 0, false, nil } else { diff --git a/pkg/scheduler/framework/fake/listers.go b/pkg/scheduler/testing/framework/fake_listers.go similarity index 99% rename from pkg/scheduler/framework/fake/listers.go rename to pkg/scheduler/testing/framework/fake_listers.go index 61c8dfc3460a6..2135f68129b4c 100644 --- a/pkg/scheduler/framework/fake/listers.go +++ b/pkg/scheduler/testing/framework/fake_listers.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package fake +package framework import ( "fmt" diff --git a/pkg/scheduler/testing/fake_plugins.go b/pkg/scheduler/testing/framework/fake_plugins.go similarity index 89% rename from pkg/scheduler/testing/fake_plugins.go rename to pkg/scheduler/testing/framework/fake_plugins.go index ce6fafa4a8f3e..d4c3b480b8d05 100644 --- a/pkg/scheduler/testing/fake_plugins.go +++ b/pkg/scheduler/testing/framework/fake_plugins.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testing +package framework import ( "context" @@ -45,7 +45,7 @@ func (pl *FalseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, } // NewFalseFilterPlugin initializes a FalseFilterPlugin and returns it. -func NewFalseFilterPlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func NewFalseFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FalseFilterPlugin{}, nil } @@ -63,7 +63,7 @@ func (pl *TrueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, p } // NewTrueFilterPlugin initializes a TrueFilterPlugin and returns it. -func NewTrueFilterPlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func NewTrueFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &TrueFilterPlugin{}, nil } @@ -102,7 +102,7 @@ func (pl *FakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, p // NewFakeFilterPlugin initializes a fakeFilterPlugin and returns it. func NewFakeFilterPlugin(failedNodeReturnCodeMap map[string]framework.Code) frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FakeFilterPlugin{ FailedNodeReturnCodeMap: failedNodeReturnCodeMap, }, nil @@ -131,7 +131,7 @@ func (pl *MatchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, } // NewMatchFilterPlugin initializes a MatchFilterPlugin and returns it. -func NewMatchFilterPlugin(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { +func NewMatchFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &MatchFilterPlugin{}, nil } @@ -159,7 +159,7 @@ func (pl *FakePreFilterPlugin) PreFilterExtensions() framework.PreFilterExtensio // NewFakePreFilterPlugin initializes a fakePreFilterPlugin and returns it. func NewFakePreFilterPlugin(name string, result *framework.PreFilterResult, status *framework.Status) frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FakePreFilterPlugin{ Result: result, Status: status, @@ -189,7 +189,7 @@ func (pl *FakeReservePlugin) Unreserve(_ context.Context, _ *framework.CycleStat // NewFakeReservePlugin initializes a fakeReservePlugin and returns it. func NewFakeReservePlugin(status *framework.Status) frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FakeReservePlugin{ Status: status, }, nil @@ -213,7 +213,7 @@ func (pl *FakePreBindPlugin) PreBind(_ context.Context, _ *framework.CycleState, // NewFakePreBindPlugin initializes a fakePreBindPlugin and returns it. func NewFakePreBindPlugin(status *framework.Status) frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FakePreBindPlugin{ Status: status, }, nil @@ -238,7 +238,7 @@ func (pl *FakePermitPlugin) Permit(_ context.Context, _ *framework.CycleState, _ // NewFakePermitPlugin initializes a fakePermitPlugin and returns it. func NewFakePermitPlugin(status *framework.Status, timeout time.Duration) frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FakePermitPlugin{ Status: status, Timeout: timeout, @@ -271,7 +271,7 @@ func (pl *FakePreScoreAndScorePlugin) PreScore(ctx context.Context, state *frame } func NewFakePreScoreAndScorePlugin(name string, score int64, preScoreStatus, scoreStatus *framework.Status) frameworkruntime.PluginFactory { - return func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) { + return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) { return &FakePreScoreAndScorePlugin{ name: name, score: score, diff --git a/pkg/scheduler/testing/framework_helpers.go b/pkg/scheduler/testing/framework/framework_helpers.go similarity index 99% rename from pkg/scheduler/testing/framework_helpers.go rename to pkg/scheduler/testing/framework/framework_helpers.go index a3097702885d4..72110f2315e14 100644 --- a/pkg/scheduler/testing/framework_helpers.go +++ b/pkg/scheduler/testing/framework/framework_helpers.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testing +package framework import ( "context" diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 8d94118df10b6..b564bd0cafb03 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" imageutils "k8s.io/kubernetes/test/utils/image" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var zero int64 @@ -255,7 +255,7 @@ func (p *PodWrapper) OwnerReference(name string, gvk schema.GroupVersionKind) *P APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: name, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, } return p @@ -416,6 +416,12 @@ func (p *PodWrapper) Volume(volume v1.Volume) *PodWrapper { return p } +// Volumes set the volumes and inject into the inner pod. +func (p *PodWrapper) Volumes(volumes []v1.Volume) *PodWrapper { + p.Spec.Volumes = volumes + return p +} + // SchedulingGates sets `gates` as additional SchedulerGates of the inner pod. func (p *PodWrapper) SchedulingGates(gates []string) *PodWrapper { for _, gate := range gates { @@ -889,7 +895,7 @@ func (wrapper *ResourceClaimWrapper) OwnerReference(name, uid string, gvk schema Kind: gvk.Kind, Name: name, UID: types.UID(uid), - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, } return wrapper @@ -971,8 +977,8 @@ func (wrapper *PodSchedulingWrapper) OwnerReference(name, uid string, gvk schema Kind: gvk.Kind, Name: name, UID: types.UID(uid), - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, } return wrapper diff --git a/pkg/scheduler/util/utils.go b/pkg/scheduler/util/utils.go index 967c248355d8f..e14b4e77453c2 100644 --- a/pkg/scheduler/util/utils.go +++ b/pkg/scheduler/util/utils.go @@ -25,7 +25,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/net" @@ -167,7 +166,7 @@ func IsScalarResourceName(name v1.ResourceName) bool { // nil objects are allowed and will be converted to nil. // For oldObj, cache.DeletedFinalStateUnknown is handled and the // object stored in it will be converted instead. -func As[T runtime.Object](oldObj, newobj interface{}) (T, T, error) { +func As[T any](oldObj, newobj interface{}) (T, T, error) { var oldTyped T var newTyped T var ok bool diff --git a/pkg/scheduler/util/utils_test.go b/pkg/scheduler/util/utils_test.go index bb8115cb8e105..e187a77068bbc 100644 --- a/pkg/scheduler/util/utils_test.go +++ b/pkg/scheduler/util/utils_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/net" clientsetfake "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" + "k8s.io/klog/v2" extenderv1 "k8s.io/kube-scheduler/extender/v1" ) @@ -487,3 +488,57 @@ func Test_As_Node(t *testing.T) { }) } } + +// Test_As_KMetadata tests the As function with Pod. +func Test_As_KMetadata(t *testing.T) { + tests := []struct { + name string + oldObj interface{} + newObj interface{} + wantErr bool + }{ + { + name: "nil old Pod", + oldObj: nil, + newObj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}, + wantErr: false, + }, + { + name: "nil new Pod", + oldObj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}, + newObj: nil, + wantErr: false, + }, + { + name: "two different kinds of objects", + oldObj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}, + newObj: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}, + wantErr: false, + }, + { + name: "unknown old type", + oldObj: "unknown type", + wantErr: true, + }, + { + name: "unknown new type", + newObj: "unknown type", + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, _, err := As[klog.KMetadata](tc.oldObj, tc.newObj) + if err != nil && !tc.wantErr { + t.Fatalf("unexpected error: %v", err) + } + if tc.wantErr { + if err == nil { + t.Fatalf("expected error, but got nil") + } + return + } + }) + } +} diff --git a/pkg/util/filesystem/defaultfs.go b/pkg/util/filesystem/defaultfs.go index 0ddd2248fa9d5..39673a958996d 100644 --- a/pkg/util/filesystem/defaultfs.go +++ b/pkg/util/filesystem/defaultfs.go @@ -17,8 +17,10 @@ limitations under the License. package filesystem import ( + "fmt" "os" "path/filepath" + "runtime" "strings" "time" ) @@ -75,6 +77,32 @@ func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(fs.prefix(path), perm) } +// MkdirAllWithPathCheck checks if path exists already. If not, it creates a directory +// named path, along with any necessary parents, and returns nil, or else returns an error. +// Permission bits perm (before umask) are used for all directories that +// MkdirAllWithPathCheck creates. +// If path is already a directory, MkdirAllWithPathCheck does nothing and returns nil. +// NOTE: In case of Windows NTFS, mount points are implemented as reparse-point +// (similar to symlink) and do not represent actual directory. Hence Directory existence +// check for windows NTFS will NOT check for dir, but for symlink presence. +func MkdirAllWithPathCheck(path string, perm os.FileMode) error { + if dir, err := os.Lstat(path); err == nil { + // If the path exists already, + // 1. for Unix/Linux OS, check if the path is directory. + // 2. for windows NTFS, check if the path is symlink instead of directory. + if dir.IsDir() || + (runtime.GOOS == "windows" && (dir.Mode()&os.ModeSymlink != 0)) { + return nil + } + return fmt.Errorf("path %v exists but is not a directory", path) + } + // If existence of path not known, attempt to create it. + if err := os.MkdirAll(path, perm); err != nil { + return err + } + return nil +} + // Chtimes via os.Chtimes func (fs *DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error { return os.Chtimes(fs.prefix(name), atime, mtime) diff --git a/pkg/util/filesystem/util_test.go b/pkg/util/filesystem/util_test.go new file mode 100644 index 0000000000000..87ec73f067af5 --- /dev/null +++ b/pkg/util/filesystem/util_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "net" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIsUnixDomainSocket(t *testing.T) { + tests := []struct { + label string + listenOnSocket bool + expectSocket bool + expectError bool + invalidFile bool + }{ + { + label: "Domain Socket file", + listenOnSocket: true, + expectSocket: true, + expectError: false, + }, + { + label: "Non Existent file", + invalidFile: true, + expectError: true, + }, + { + label: "Regular file", + listenOnSocket: false, + expectSocket: false, + expectError: false, + }, + } + for _, test := range tests { + f, err := os.CreateTemp("", "test-domain-socket") + require.NoErrorf(t, err, "Failed to create file for test purposes: %v while setting up: %s", err, test.label) + addr := f.Name() + f.Close() + var ln *net.UnixListener + if test.listenOnSocket { + os.Remove(addr) + ta, err := net.ResolveUnixAddr("unix", addr) + require.NoErrorf(t, err, "Failed to ResolveUnixAddr: %v while setting up: %s", err, test.label) + ln, err = net.ListenUnix("unix", ta) + require.NoErrorf(t, err, "Failed to ListenUnix: %v while setting up: %s", err, test.label) + } + fileToTest := addr + if test.invalidFile { + fileToTest = fileToTest + ".invalid" + } + result, err := IsUnixDomainSocket(fileToTest) + if test.listenOnSocket { + // this takes care of removing the file associated with the domain socket + ln.Close() + } else { + // explicitly remove regular file + os.Remove(addr) + } + if test.expectError { + assert.Errorf(t, err, "Unexpected nil error from IsUnixDomainSocket for %s", test.label) + } else { + assert.NoErrorf(t, err, "Unexpected error invoking IsUnixDomainSocket for %s", test.label) + } + assert.Equal(t, result, test.expectSocket, "Unexpected result from IsUnixDomainSocket: %v for %s", result, test.label) + } +} diff --git a/pkg/util/filesystem/util_unix.go b/pkg/util/filesystem/util_unix.go new file mode 100644 index 0000000000000..df887f94508b0 --- /dev/null +++ b/pkg/util/filesystem/util_unix.go @@ -0,0 +1,37 @@ +//go:build freebsd || linux || darwin +// +build freebsd linux darwin + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "fmt" + "os" +) + +// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file +func IsUnixDomainSocket(filePath string) (bool, error) { + fi, err := os.Stat(filePath) + if err != nil { + return false, fmt.Errorf("stat file %s failed: %v", filePath, err) + } + if fi.Mode()&os.ModeSocket == 0 { + return false, nil + } + return true, nil +} diff --git a/pkg/util/filesystem/util_windows.go b/pkg/util/filesystem/util_windows.go new file mode 100644 index 0000000000000..cd6a11ed30893 --- /dev/null +++ b/pkg/util/filesystem/util_windows.go @@ -0,0 +1,87 @@ +//go:build windows +// +build windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "fmt" + "net" + "os" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +const ( + // Amount of time to wait between attempting to use a Unix domain socket. + // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 + // the first attempt will most likely fail, hence the need to retry + socketDialRetryPeriod = 1 * time.Second + // Overall timeout value to dial a Unix domain socket, including retries + socketDialTimeout = 4 * time.Second +) + +// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file +// Note that due to the retry logic inside, it could take up to 4 seconds +// to determine whether or not the file path supplied is a Unix domain socket +func IsUnixDomainSocket(filePath string) (bool, error) { + // Due to the absence of golang support for os.ModeSocket in Windows (https://github.com/golang/go/issues/33357) + // we need to dial the file and check if we receive an error to determine if a file is Unix Domain Socket file. + + // Note that querrying for the Reparse Points (https://docs.microsoft.com/en-us/windows/win32/fileio/reparse-points) + // for the file (using FSCTL_GET_REPARSE_POINT) and checking for reparse tag: reparseTagSocket + // does NOT work in 1809 if the socket file is created within a bind mounted directory by a container + // and the FSCTL is issued in the host by the kubelet. + + // If the file does not exist, it cannot be a Unix domain socket. + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return false, fmt.Errorf("File %s not found. Err: %v", filePath, err) + } + + klog.V(6).InfoS("Function IsUnixDomainSocket starts", "filePath", filePath) + // As detailed in https://github.com/kubernetes/kubernetes/issues/104584 we cannot rely + // on the Unix Domain socket working on the very first try, hence the potential need to + // dial multiple times + var lastSocketErr error + err := wait.PollImmediate(socketDialRetryPeriod, socketDialTimeout, + func() (bool, error) { + klog.V(6).InfoS("Dialing the socket", "filePath", filePath) + var c net.Conn + c, lastSocketErr = net.Dial("unix", filePath) + if lastSocketErr == nil { + c.Close() + klog.V(6).InfoS("Socket dialed successfully", "filePath", filePath) + return true, nil + } + klog.V(6).InfoS("Failed the current attempt to dial the socket, so pausing before retry", + "filePath", filePath, "err", lastSocketErr, "socketDialRetryPeriod", + socketDialRetryPeriod) + return false, nil + }) + + // PollImmediate will return "timed out waiting for the condition" if the function it + // invokes never returns true + if err != nil { + klog.V(2).InfoS("Failed all attempts to dial the socket so marking it as a non-Unix Domain socket. Last socket error along with the error from PollImmediate follow", + "filePath", filePath, "lastSocketErr", lastSocketErr, "err", err) + return false, nil + } + return true, nil +} diff --git a/pkg/util/filesystem/util_windows_test.go b/pkg/util/filesystem/util_windows_test.go new file mode 100644 index 0000000000000..7a4afefce426b --- /dev/null +++ b/pkg/util/filesystem/util_windows_test.go @@ -0,0 +1,89 @@ +//go:build windows +// +build windows + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "net" + "os" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIsUnixDomainSocketPipe(t *testing.T) { + generatePipeName := func(suffixLen int) string { + rand.Seed(time.Now().UnixNano()) + letter := []rune("abcdef0123456789") + b := make([]rune, suffixLen) + for i := range b { + b[i] = letter[rand.Intn(len(letter))] + } + return "\\\\.\\pipe\\test-pipe" + string(b) + } + testFile := generatePipeName(4) + pipeln, err := winio.ListenPipe(testFile, &winio.PipeConfig{SecurityDescriptor: "D:P(A;;GA;;;BA)(A;;GA;;;SY)"}) + defer pipeln.Close() + + require.NoErrorf(t, err, "Failed to listen on named pipe for test purposes: %v", err) + result, err := IsUnixDomainSocket(testFile) + assert.NoError(t, err, "Unexpected error from IsUnixDomainSocket.") + assert.False(t, result, "Unexpected result: true from IsUnixDomainSocket.") +} + +// This is required as on Windows it's possible for the socket file backing a Unix domain socket to +// exist but not be ready for socket communications yet as per +// https://github.com/kubernetes/kubernetes/issues/104584 +func TestPendingUnixDomainSocket(t *testing.T) { + // Create a temporary file that will simulate the Unix domain socket file in a + // not-yet-ready state. We need this because the Kubelet keeps an eye on file + // changes and acts on them, leading to potential race issues as described in + // the referenced issue above + f, err := os.CreateTemp("", "test-domain-socket") + require.NoErrorf(t, err, "Failed to create file for test purposes: %v", err) + testFile := f.Name() + f.Close() + + // Start the check at this point + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + result, err := IsUnixDomainSocket(testFile) + assert.Nil(t, err, "Unexpected error from IsUnixDomainSocket: %v", err) + assert.True(t, result, "Unexpected result: false from IsUnixDomainSocket.") + wg.Done() + }() + + // Wait a sufficient amount of time to make sure the retry logic kicks in + time.Sleep(socketDialRetryPeriod) + + // Replace the temporary file with an actual Unix domain socket file + os.Remove(testFile) + ta, err := net.ResolveUnixAddr("unix", testFile) + require.NoError(t, err, "Failed to ResolveUnixAddr.") + unixln, err := net.ListenUnix("unix", ta) + require.NoError(t, err, "Failed to ListenUnix.") + + // Wait for the goroutine to finish, then close the socket + wg.Wait() + unixln.Close() +} diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index ef3c98258ac88..b17646320fc99 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" @@ -287,7 +288,9 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo if c.csiClient == nil { c.csiClient, err = newCsiDriverClient(csiDriverName(csiSource.Driver)) if err != nil { - return errors.New(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return volumetypes.NewTransientOperationFailure(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err)) } } csi := c.csiClient @@ -339,9 +342,10 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo // Store volume metadata for UnmountDevice. Keep it around even if the // driver does not support NodeStage, UnmountDevice still needs it. - if err = os.MkdirAll(deviceMountPath, 0750); err != nil { + if err = filesystem.MkdirAllWithPathCheck(deviceMountPath, 0750); err != nil { return errors.New(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err)) } + klog.V(4).Info(log("created target path successfully [%s]", deviceMountPath)) dataDir := filepath.Dir(deviceMountPath) data := map[string]string{ @@ -607,7 +611,9 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { if c.csiClient == nil { c.csiClient, err = newCsiDriverClient(csiDriverName(driverName)) if err != nil { - return errors.New(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return volumetypes.NewTransientOperationFailure(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err)) } } csi := c.csiClient diff --git a/pkg/volume/csi/csi_attacher_test.go b/pkg/volume/csi/csi_attacher_test.go index d2ed6ba37e40f..9e1281a3ac480 100644 --- a/pkg/volume/csi/csi_attacher_test.go +++ b/pkg/volume/csi/csi_attacher_test.go @@ -1109,6 +1109,7 @@ func TestAttacherMountDevice(t *testing.T) { exitError error spec *volume.Spec watchTimeout time.Duration + skipClientSetup bool }{ { testName: "normal PV", @@ -1249,6 +1250,20 @@ func TestAttacherMountDevice(t *testing.T) { createAttachment: true, spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), false), }, + { + testName: "driver not specified", + volName: "test-vol1", + devicePath: "path1", + deviceMountPath: "path2", + fsGroup: &testFSGroup, + stageUnstageSet: true, + createAttachment: true, + populateDeviceMountPath: false, + spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, "not-found", "test-vol1"), false), + exitError: transientError, + shouldFail: true, + skipClientSetup: true, + }, } for _, tc := range testCases { @@ -1277,7 +1292,9 @@ func TestAttacherMountDevice(t *testing.T) { t.Fatalf("failed to create new attacher: %v", err0) } csiAttacher := getCsiAttacherFromVolumeAttacher(attacher, tc.watchTimeout) - csiAttacher.csiClient = setupClientWithVolumeMountGroup(t, tc.stageUnstageSet, tc.driverSupportsVolumeMountGroup) + if !tc.skipClientSetup { + csiAttacher.csiClient = setupClientWithVolumeMountGroup(t, tc.stageUnstageSet, tc.driverSupportsVolumeMountGroup) + } if tc.deviceMountPath != "" { tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath) @@ -1342,16 +1359,15 @@ func TestAttacherMountDevice(t *testing.T) { t.Errorf("failed to modify permissions after test: %v", err) } } + if tc.exitError != nil && reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(tc.exitError), reflect.TypeOf(err), err) + } return } if tc.shouldFail { t.Errorf("test should fail, but no error occurred") } - if tc.exitError != nil && reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) { - t.Fatalf("expected exitError: %v got: %v", tc.exitError, err) - } - // Verify call goes through all the way numStaged := 1 if !tc.stageUnstageSet { @@ -1569,6 +1585,7 @@ func TestAttacherMountDeviceWithInline(t *testing.T) { } func TestAttacherUnmountDevice(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") testCases := []struct { testName string volID string @@ -1578,6 +1595,8 @@ func TestAttacherUnmountDevice(t *testing.T) { stageUnstageSet bool shouldFail bool watchTimeout time.Duration + exitError error + unsetClient bool }{ // PV agnostic path positive test cases { @@ -1609,6 +1628,17 @@ func TestAttacherUnmountDevice(t *testing.T) { stageUnstageSet: true, shouldFail: true, }, + // Ensure that a transient error is returned if the client is not established + { + testName: "fail with transient error, json file exists but client not found", + volID: "project/zone/test-vol1", + deviceMountPath: "plugins/csi/" + generateSha("project/zone/test-vol1") + "/globalmount", + jsonFile: `{"driverName": "unknown-driver", "volumeHandle":"project/zone/test-vol1"}`, + stageUnstageSet: true, + shouldFail: true, + exitError: transientError, + unsetClient: true, + }, } for _, tc := range testCases { @@ -1656,6 +1686,11 @@ func TestAttacherUnmountDevice(t *testing.T) { t.Fatalf("Failed to create PV: %v", err) } } + // Clear out the client if specified + // The lookup to generate a new client will fail + if tc.unsetClient { + csiAttacher.csiClient = nil + } // Run err := csiAttacher.UnmountDevice(tc.deviceMountPath) @@ -1664,6 +1699,9 @@ func TestAttacherUnmountDevice(t *testing.T) { if !tc.shouldFail { t.Errorf("test should not fail, but error occurred: %v", err) } + if tc.exitError != nil && reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(tc.exitError), reflect.TypeOf(err), err) + } return } if tc.shouldFail { diff --git a/pkg/volume/csi/csi_block.go b/pkg/volume/csi/csi_block.go index 3e68b7bb27a6d..fa2570b42c1fb 100644 --- a/pkg/volume/csi/csi_block.go +++ b/pkg/volume/csi/csi_block.go @@ -319,7 +319,9 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { csiClient, err := m.csiClientGetter.Get() if err != nil { - return "", errors.New(log("blockMapper.SetUpDevice failed to get CSI client: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return "", volumetypes.NewTransientOperationFailure(log("blockMapper.SetUpDevice failed to get CSI client: %v", err)) } // Call NodeStageVolume @@ -379,7 +381,9 @@ func (m *csiBlockMapper) MapPodDevice() (string, error) { csiClient, err := m.csiClientGetter.Get() if err != nil { - return "", errors.New(log("blockMapper.MapPodDevice failed to get CSI client: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return "", volumetypes.NewTransientOperationFailure(log("blockMapper.MapPodDevice failed to get CSI client: %v", err)) } // Call NodePublishVolume @@ -444,7 +448,9 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error csiClient, err := m.csiClientGetter.Get() if err != nil { - return errors.New(log("blockMapper.TearDownDevice failed to get CSI client: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return volumetypes.NewTransientOperationFailure(log("blockMapper.TearDownDevice failed to get CSI client: %v", err)) } // Call NodeUnstageVolume @@ -506,7 +512,9 @@ func (m *csiBlockMapper) UnmapPodDevice() error { csiClient, err := m.csiClientGetter.Get() if err != nil { - return errors.New(log("blockMapper.UnmapPodDevice failed to get CSI client: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return volumetypes.NewTransientOperationFailure(log("blockMapper.UnmapPodDevice failed to get CSI client: %v", err)) } ctx, cancel := createCSIOperationContext(m.spec, csiTimeout) diff --git a/pkg/volume/csi/csi_block_test.go b/pkg/volume/csi/csi_block_test.go index 3a3d153f2930a..656053d21a447 100644 --- a/pkg/volume/csi/csi_block_test.go +++ b/pkg/volume/csi/csi_block_test.go @@ -32,6 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeclient "k8s.io/client-go/kubernetes/fake" "k8s.io/kubernetes/pkg/volume" + volumetypes "k8s.io/kubernetes/pkg/volume/util/types" ) func prepareBlockMapperTest(plug *csiPlugin, specVolumeName string, t *testing.T) (*csiBlockMapper, *volume.Spec, *api.PersistentVolume, error) { @@ -283,6 +284,45 @@ func TestBlockMapperSetupDeviceError(t *testing.T) { } } +func TestBlockMapperSetupDeviceNoClientError(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + + csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t) + if err != nil { + t.Fatalf("Failed to make a new Mapper: %v", err) + } + + pvName := pv.GetName() + nodeName := string(plug.host.GetNodeName()) + + csiMapper.csiClient = setupClient(t, true) + + attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) + attachment := makeTestAttachment(attachID, nodeName, pvName) + attachment.Status.Attached = true + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to setup VolumeAttachment: %v", err) + } + t.Log("created attachment ", attachID) + + // Clear out the clients + // The lookup to generate a new client will fail when it tries to query a driver with an unknown name + csiMapper.csiClient = nil + csiMapper.csiClientGetter.csiClient = nil + // Note that prepareBlockMapperTest above will create a driver with a name of "test-driver" + csiMapper.csiClientGetter.driverName = "unknown-driver" + + _, err = csiMapper.SetUpDevice() + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(transientError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } +} + func TestBlockMapperMapPodDevice(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil) defer os.RemoveAll(tmpDir) @@ -413,6 +453,45 @@ func TestBlockMapperMapPodDeviceWithPodInfo(t *testing.T) { } } +func TestBlockMapperMapPodDeviceNoClientError(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + + csiMapper, _, pv, err := prepareBlockMapperTest(plug, "test-pv", t) + if err != nil { + t.Fatalf("Failed to make a new Mapper: %v", err) + } + + pvName := pv.GetName() + nodeName := string(plug.host.GetNodeName()) + + csiMapper.csiClient = setupClient(t, true) + + attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), nodeName) + attachment := makeTestAttachment(attachID, nodeName, pvName) + attachment.Status.Attached = true + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.Background(), attachment, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to setup VolumeAttachment: %v", err) + } + t.Log("created attachment ", attachID) + + // Clear out the clients + // The lookup to generate a new client will fail when it tries to query a driver with an unknown name + csiMapper.csiClient = nil + csiMapper.csiClientGetter.csiClient = nil + // Note that prepareBlockMapperTest above will create a driver with a name of "test-driver" + csiMapper.csiClientGetter.driverName = "unknown-driver" + + _, err = csiMapper.MapPodDevice() + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(transientError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } +} + func TestBlockMapperTearDownDevice(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil) defer os.RemoveAll(tmpDir) @@ -471,6 +550,62 @@ func TestBlockMapperTearDownDevice(t *testing.T) { } } +func TestBlockMapperTearDownDeviceNoClientError(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + + _, spec, pv, err := prepareBlockMapperTest(plug, "test-pv", t) + if err != nil { + t.Fatalf("Failed to make a new Mapper: %v", err) + } + + // save volume data + dir := getVolumeDeviceDataDir(pv.ObjectMeta.Name, plug.host) + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) { + t.Errorf("failed to create dir [%s]: %v", dir, err) + } + + if err := saveVolumeData( + dir, + volDataFileName, + map[string]string{ + volDataKey.specVolID: pv.ObjectMeta.Name, + volDataKey.driverName: testDriver, + volDataKey.volHandle: testVol, + }, + ); err != nil { + t.Fatalf("failed to save volume data: %v", err) + } + + unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID) + if err != nil { + t.Fatalf("failed to make a new Unmapper: %v", err) + } + + csiUnmapper := unmapper.(*csiBlockMapper) + csiUnmapper.csiClient = setupClient(t, true) + + globalMapPath, err := csiUnmapper.GetGlobalMapPath(spec) + if err != nil { + t.Fatalf("unmapper failed to GetGlobalMapPath: %v", err) + } + + // Clear out the clients + // The lookup to generate a new client will fail when it tries to query a driver with an unknown name + csiUnmapper.csiClient = nil + csiUnmapper.csiClientGetter.csiClient = nil + // Note that prepareBlockMapperTest above will create a driver with a name of "test-driver" + csiUnmapper.csiClientGetter.driverName = "unknown-driver" + + err = csiUnmapper.TearDownDevice(globalMapPath, "/dev/test") + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(transientError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } +} + func TestVolumeSetupTeardown(t *testing.T) { // Follow volume setup + teardown sequences at top of cs_block.go and set up / clean up one CSI block device. // Focus on testing that there were no leftover files present after the cleanup. @@ -587,3 +722,86 @@ func TestVolumeSetupTeardown(t *testing.T) { t.Errorf("volume staging path %s was not deleted", stagingPath) } } + +func TestUnmapPodDeviceNoClientError(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + + csiMapper, spec, pv, err := prepareBlockMapperTest(plug, "test-pv", t) + if err != nil { + t.Fatalf("Failed to make a new Mapper: %v", err) + } + + pvName := pv.GetName() + nodeName := string(plug.host.GetNodeName()) + + csiMapper.csiClient = setupClient(t, true) + + attachID := getAttachmentName(csiMapper.volumeID, string(csiMapper.driverName), string(nodeName)) + attachment := makeTestAttachment(attachID, nodeName, pvName) + attachment.Status.Attached = true + _, err = csiMapper.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to setup VolumeAttachment: %v", err) + } + t.Log("created attachment ", attachID) + + stagingPath, err := csiMapper.SetUpDevice() + if err != nil { + t.Fatalf("mapper failed to SetupDevice: %v", err) + } + // Check if NodeStageVolume staged to the right path + svols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodeStagedVolumes() + svol, ok := svols[csiMapper.volumeID] + if !ok { + t.Error("csi server may not have received NodeStageVolume call") + } + if svol.Path != stagingPath { + t.Errorf("csi server expected device path %s, got %s", stagingPath, svol.Path) + } + + path, err := csiMapper.MapPodDevice() + if err != nil { + t.Fatalf("mapper failed to GetGlobalMapPath: %v", err) + } + pvols := csiMapper.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes() + pvol, ok := pvols[csiMapper.volumeID] + if !ok { + t.Error("csi server may not have received NodePublishVolume call") + } + publishPath := csiMapper.getPublishPath() + if pvol.Path != publishPath { + t.Errorf("csi server expected path %s, got %s", publishPath, pvol.Path) + } + if path != publishPath { + t.Errorf("csi server expected path %s, but MapPodDevice returned %s", publishPath, path) + } + + unmapper, err := plug.NewBlockVolumeUnmapper(pv.ObjectMeta.Name, testPodUID) + if err != nil { + t.Fatalf("failed to make a new Unmapper: %v", err) + } + + csiUnmapper := unmapper.(*csiBlockMapper) + csiUnmapper.csiClient = csiMapper.csiClient + + _, err = csiUnmapper.GetGlobalMapPath(spec) + if err != nil { + t.Fatalf("unmapper failed to GetGlobalMapPath: %v", err) + } + + // Clear out the clients + // The lookup to generate a new client will fail when it tries to query a driver with an unknown name + csiUnmapper.csiClient = nil + csiUnmapper.csiClientGetter.csiClient = nil + // Note that prepareBlockMapperTest above will create a driver with a name of "test-driver" + csiUnmapper.csiClientGetter.driverName = "unknown-driver" + + err = csiUnmapper.UnmapPodDevice() + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(transientError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } +} diff --git a/pkg/volume/csi/csi_metrics.go b/pkg/volume/csi/csi_metrics.go index b61e2fd2813bb..1703e5447834f 100644 --- a/pkg/volume/csi/csi_metrics.go +++ b/pkg/volume/csi/csi_metrics.go @@ -26,6 +26,7 @@ import ( servermetrics "k8s.io/kubernetes/pkg/kubelet/server/metrics" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + volumetypes "k8s.io/kubernetes/pkg/volume/util/types" ) var _ volume.MetricsProvider = &metricsCsi{} @@ -60,7 +61,9 @@ func (mc *metricsCsi) GetMetrics() (*volume.Metrics, error) { // Get CSI client csiClient, err := mc.csiClientGetter.Get() if err != nil { - return nil, err + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return nil, volumetypes.NewTransientOperationFailure(err.Error()) } // Check whether "GET_VOLUME_STATS" is set volumeStatsSet, err := csiClient.NodeSupportsVolumeStats(ctx) diff --git a/pkg/volume/csi/csi_metrics_test.go b/pkg/volume/csi/csi_metrics_test.go index 9eecdf62d5f82..fc6253399d9db 100644 --- a/pkg/volume/csi/csi_metrics_test.go +++ b/pkg/volume/csi/csi_metrics_test.go @@ -18,12 +18,14 @@ package csi import ( "io" + "reflect" "testing" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/csi/fake" + volumetypes "k8s.io/kubernetes/pkg/volume/util/types" ) func TestGetMetrics(t *testing.T) { @@ -137,6 +139,36 @@ func TestGetMetricsDriverNotSupportStats(t *testing.T) { } +// test GetMetrics with a volume that does not support stats +func TestGetMetricsDriverNotFound(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + tests := []struct { + name string + volumeID string + targetPath string + exitError error + }{ + { + name: "volume with no driver", + volumeID: "foobar", + targetPath: "/mnt/foo", + exitError: transientError, + }, + } + + for _, tc := range tests { + metricsGetter := &metricsCsi{volumeID: tc.volumeID, targetPath: tc.targetPath} + metricsGetter.driverName = "unknown-driver" + _, err := metricsGetter.GetMetrics() + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } + } + +} + func getRawVolumeInfo() *csipbv1.NodeGetVolumeStatsResponse { return &csipbv1.NodeGetVolumeStatsResponse{ Usage: []*csipbv1.VolumeUsage{ diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 468f882b88458..a1afdfa217373 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -105,6 +105,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error csi, err := c.csiClientGetter.Get() if err != nil { + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get CSI client: %v", err)) } @@ -419,7 +421,9 @@ func (c *csiMountMgr) TearDownAt(dir string) error { volID := c.volumeID csi, err := c.csiClientGetter.Get() if err != nil { - return errors.New(log("Unmounter.TearDownAt failed to get CSI client: %v", err)) + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return volumetypes.NewTransientOperationFailure(log("Unmounter.TearDownAt failed to get CSI client: %v", err)) } // Could not get spec info on whether this is a migrated operation because c.spec is nil diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index a5e542ea6a4f5..62c94698c206d 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -366,6 +366,7 @@ func TestMounterSetUp(t *testing.T) { func TestMounterSetUpSimple(t *testing.T) { fakeClient := fakeclient.NewSimpleClientset() plug, tmpDir := newTestPlugin(t, fakeClient) + transientError := volumetypes.NewTransientOperationFailure("") defer os.RemoveAll(tmpDir) testCases := []struct { @@ -377,6 +378,8 @@ func TestMounterSetUpSimple(t *testing.T) { spec func(string, []string) *volume.Spec newMounterShouldFail bool setupShouldFail bool + unsetClient bool + exitError error }{ { name: "setup with ephemeral source", @@ -415,6 +418,21 @@ func TestMounterSetUpSimple(t *testing.T) { newMounterShouldFail: true, spec: func(fsType string, options []string) *volume.Spec { return nil }, }, + { + name: "setup with unknown CSI driver", + podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), + mode: storage.VolumeLifecyclePersistent, + fsType: "zfs", + spec: func(fsType string, options []string) *volume.Spec { + pvSrc := makeTestPV("pv1", 20, "unknown-driver", "vol1") + pvSrc.Spec.CSI.FSType = fsType + pvSrc.Spec.MountOptions = options + return volume.NewSpecFromPersistentVolume(pvSrc, false) + }, + setupShouldFail: true, + unsetClient: true, + exitError: transientError, + }, } for _, tc := range testCases { @@ -450,13 +468,26 @@ func TestMounterSetUpSimple(t *testing.T) { t.Fatalf("failed to setup VolumeAttachment: %v", err) } + if tc.unsetClient { + // Clear out the clients + csiMounter.csiClient = nil + csiMounter.csiClientGetter.csiClient = nil + t.Log("driver name is ", csiMounter.csiClientGetter.driverName) + } + // Mounter.SetUp() err = csiMounter.SetUp(volume.MounterArgs{}) - if tc.setupShouldFail && err != nil { - t.Log(err) - return - } - if !tc.setupShouldFail && err != nil { + if tc.setupShouldFail { + if err != nil { + if tc.exitError != nil && reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(tc.exitError), reflect.TypeOf(err), err) + } + t.Log(err) + return + } else { + t.Error("test should fail, but no error occurred") + } + } else if err != nil { t.Fatal("unexpected error:", err) } @@ -1063,6 +1094,64 @@ func TestUnmounterTeardown(t *testing.T) { } +func TestUnmounterTeardownNoClientError(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) + pv := makeTestPV("test-pv", 10, testDriver, testVol) + + // save the data file prior to unmount + targetDir := getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host) + dir := filepath.Join(targetDir, "mount") + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) { + t.Errorf("failed to create dir [%s]: %v", dir, err) + } + + // do a fake local mount + diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host) + device := "/fake/device" + if goruntime.GOOS == "windows" { + // We need disk numbers on Windows. + device = "1" + } + if err := diskMounter.FormatAndMount(device, dir, "testfs", nil); err != nil { + t.Errorf("failed to mount dir [%s]: %v", dir, err) + } + + if err := saveVolumeData( + targetDir, + volDataFileName, + map[string]string{ + volDataKey.specVolID: pv.ObjectMeta.Name, + volDataKey.driverName: testDriver, + volDataKey.volHandle: testVol, + }, + ); err != nil { + t.Fatalf("failed to save volume data: %v", err) + } + + unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID) + if err != nil { + t.Fatalf("failed to make a new Unmounter: %v", err) + } + + csiUnmounter := unmounter.(*csiMountMgr) + + // Clear out the cached client + // The lookup to generate a new client will fail when it tries to query a driver with an unknown name + csiUnmounter.csiClientGetter.csiClient = nil + // Note that registerFakePlugin above will create a driver with a name of "test-driver" + csiUnmounter.csiClientGetter.driverName = "unknown-driver" + + err = csiUnmounter.TearDownAt(dir) + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(transientError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } +} + func TestIsCorruptedDir(t *testing.T) { existingMountPath, err := os.MkdirTemp(os.TempDir(), "blobfuse-csi-mount-test") if err != nil { diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 2556517276e68..7176c826c15a6 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -157,7 +157,12 @@ func (h *RegistrationHandler) validateVersions(callerName, pluginName string, en } // Validate version - newDriverHighestVersion, err := highestSupportedVersion(versions) + // CSI currently only has version 0.x and 1.x (see https://github.com/container-storage-interface/spec/releases). + // Therefore any driver claiming version 2.x+ is ignored as an unsupported versions. + // Future 1.x versions of CSI are supposed to be backwards compatible so this version of Kubernetes will work with any 1.x driver + // (or 0.x), but it may not work with 2.x drivers (because 2.x does not have to be backwards compatible with 1.x). + // CSI v0.x is no longer supported as of Kubernetes v1.17 in accordance with deprecation policy set out in Kubernetes v1.13. + newDriverHighestVersion, err := utilversion.HighestSupportedVersion(versions) if err != nil { return nil, errors.New(log("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err)) } @@ -231,9 +236,6 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { csitranslationplugins.AzureFileInTreePluginName: func() bool { return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureFile) }, - csitranslationplugins.VSphereInTreePluginName: func() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationvSphere) - }, csitranslationplugins.PortworxVolumePluginName: func() bool { return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx) }, @@ -858,44 +860,6 @@ func unregisterDriver(driverName string) error { return nil } -// Return the highest supported version -func highestSupportedVersion(versions []string) (*utilversion.Version, error) { - if len(versions) == 0 { - return nil, errors.New(log("CSI driver reporting empty array for supported versions")) - } - - var highestSupportedVersion *utilversion.Version - var theErr error - for i := len(versions) - 1; i >= 0; i-- { - currentHighestVer, err := utilversion.ParseGeneric(versions[i]) - if err != nil { - theErr = err - continue - } - if currentHighestVer.Major() > 1 { - // CSI currently only has version 0.x and 1.x (see https://github.com/container-storage-interface/spec/releases). - // Therefore any driver claiming version 2.x+ is ignored as an unsupported versions. - // Future 1.x versions of CSI are supposed to be backwards compatible so this version of Kubernetes will work with any 1.x driver - // (or 0.x), but it may not work with 2.x drivers (because 2.x does not have to be backwards compatible with 1.x). - continue - } - if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { - highestSupportedVersion = currentHighestVer - } - } - - if highestSupportedVersion == nil { - return nil, fmt.Errorf("could not find a highest supported version from versions (%v) reported by this driver: %v", versions, theErr) - } - - if highestSupportedVersion.Major() != 1 { - // CSI v0.x is no longer supported as of Kubernetes v1.17 in - // accordance with deprecation policy set out in Kubernetes v1.13 - return nil, fmt.Errorf("highest supported version reported by driver is %v, must be v1.x", highestSupportedVersion) - } - return highestSupportedVersion, nil -} - // waitForAPIServerForever waits forever to get a CSINode instance as a proxy // for a healthy APIServer func waitForAPIServerForever(client clientset.Interface, nodeName types.NodeName) error { diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index 4a76d734195cd..e6c4f18a5b46a 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -28,6 +28,7 @@ import ( storage "k8s.io/api/storage/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" @@ -142,7 +143,7 @@ func newTestPluginWithVolumeHost(t *testing.T, client *fakeclient.Clientset, hos } func registerFakePlugin(pluginName, endpoint string, versions []string, t *testing.T) { - highestSupportedVersions, err := highestSupportedVersion(versions) + highestSupportedVersions, err := utilversion.HighestSupportedVersion(versions) if err != nil { t.Fatalf("unexpected error parsing versions (%v) for pluginName %q endpoint %q: %#v", versions, pluginName, endpoint, err) } @@ -369,7 +370,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tc.seLinuxMountEnabled)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, tc.seLinuxMountEnabled)() mounter, err := plug.NewMounter( @@ -1411,7 +1411,7 @@ func TestValidatePluginExistingDriver(t *testing.T) { for _, tc := range testCases { // Arrange & Act - highestSupportedVersions1, err := highestSupportedVersion(tc.versions1) + highestSupportedVersions1, err := utilversion.HighestSupportedVersion(tc.versions1) if err != nil { t.Fatalf("unexpected error parsing version for testcase: %#v: %v", tc, err) } @@ -1434,109 +1434,3 @@ func TestValidatePluginExistingDriver(t *testing.T) { } } } - -func TestHighestSupportedVersion(t *testing.T) { - testCases := []struct { - versions []string - expectedHighestSupportedVersion string - shouldFail bool - }{ - { - versions: []string{"v1.0.0"}, - expectedHighestSupportedVersion: "1.0.0", - shouldFail: false, - }, - { - versions: []string{"0.3.0"}, - shouldFail: true, - }, - { - versions: []string{"0.2.0"}, - shouldFail: true, - }, - { - versions: []string{"1.0.0"}, - expectedHighestSupportedVersion: "1.0.0", - shouldFail: false, - }, - { - versions: []string{"v0.3.0"}, - shouldFail: true, - }, - { - versions: []string{"0.2.0"}, - shouldFail: true, - }, - { - versions: []string{"0.2.0", "v0.3.0"}, - shouldFail: true, - }, - { - versions: []string{"0.2.0", "v1.0.0"}, - expectedHighestSupportedVersion: "1.0.0", - shouldFail: false, - }, - { - versions: []string{"0.2.0", "v1.2.3"}, - expectedHighestSupportedVersion: "1.2.3", - shouldFail: false, - }, - { - versions: []string{"v1.2.3", "v0.3.0"}, - expectedHighestSupportedVersion: "1.2.3", - shouldFail: false, - }, - { - versions: []string{"v1.2.3", "v0.3.0", "2.0.1"}, - expectedHighestSupportedVersion: "1.2.3", - shouldFail: false, - }, - { - versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"}, - expectedHighestSupportedVersion: "1.2.3", - shouldFail: false, - }, - { - versions: []string{"4.9.12", "2.0.1"}, - expectedHighestSupportedVersion: "", - shouldFail: true, - }, - { - versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"}, - expectedHighestSupportedVersion: "1.2.3", - shouldFail: false, - }, - { - versions: []string{}, - expectedHighestSupportedVersion: "", - shouldFail: true, - }, - { - versions: []string{"var", "boo", "foo"}, - expectedHighestSupportedVersion: "", - shouldFail: true, - }, - } - - for _, tc := range testCases { - // Arrange & Act - actual, err := highestSupportedVersion(tc.versions) - - // Assert - if tc.shouldFail && err == nil { - t.Fatalf("expecting highestSupportedVersion to fail, but got nil error for testcase: %#v", tc) - } - if !tc.shouldFail && err != nil { - t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err) - } - if tc.expectedHighestSupportedVersion != "" { - result, err := actual.Compare(tc.expectedHighestSupportedVersion) - if err != nil { - t.Fatalf("comparison failed with %v for testcase %#v", err, tc) - } - if result != 0 { - t.Fatalf("expectedHighestSupportedVersion %v, but got %v for tc: %#v", tc.expectedHighestSupportedVersion, actual, tc) - } - } - } -} diff --git a/pkg/volume/csi/expander.go b/pkg/volume/csi/expander.go index 206eac9a1fa70..262b4774f4394 100644 --- a/pkg/volume/csi/expander.go +++ b/pkg/volume/csi/expander.go @@ -46,7 +46,9 @@ func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, er csClient, err := newCsiDriverClient(csiDriverName(csiSource.Driver)) if err != nil { - return false, err + // Treat the absence of the CSI driver as a transient error + // See https://github.com/kubernetes/kubernetes/issues/120268 + return false, volumetypes.NewTransientOperationFailure(err.Error()) } fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec) if err != nil { diff --git a/pkg/volume/csi/expander_test.go b/pkg/volume/csi/expander_test.go index bb12cd6fdc677..2e4ab7ffeb698 100644 --- a/pkg/volume/csi/expander_test.go +++ b/pkg/volume/csi/expander_test.go @@ -19,6 +19,7 @@ package csi import ( "context" "os" + "reflect" "testing" "google.golang.org/grpc/codes" @@ -192,3 +193,28 @@ func TestNodeExpand(t *testing.T) { }) } } + +func TestNodeExpandNoClientError(t *testing.T) { + transientError := volumetypes.NewTransientOperationFailure("") + plug, tmpDir := newTestPlugin(t, nil) + defer os.RemoveAll(tmpDir) + spec := volume.NewSpecFromPersistentVolume(makeTestPV("test-pv", 10, "expandable", "test-vol"), false) + + newSize, _ := resource.ParseQuantity("20Gi") + + resizeOptions := volume.NodeResizeOptions{ + VolumeSpec: spec, + NewSize: newSize, + DeviceMountPath: "/foo/bar", + DeviceStagePath: "/foo/bar", + DevicePath: "/mnt/foobar", + } + + _, err := plug.NodeExpand(resizeOptions) + + if err == nil { + t.Errorf("test should fail, but no error occurred") + } else if reflect.TypeOf(transientError) != reflect.TypeOf(err) { + t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err) + } +} diff --git a/pkg/volume/csimigration/plugin_manager.go b/pkg/volume/csimigration/plugin_manager.go index c2749ea37ec81..c38e8c5073be0 100644 --- a/pkg/volume/csimigration/plugin_manager.go +++ b/pkg/volume/csimigration/plugin_manager.go @@ -99,7 +99,7 @@ func (pm PluginManager) IsMigrationEnabledForPlugin(pluginName string) bool { case csilibplugins.CinderInTreePluginName: return true case csilibplugins.VSphereInTreePluginName: - return pm.featureGate.Enabled(features.CSIMigrationvSphere) + return true case csilibplugins.PortworxVolumePluginName: return pm.featureGate.Enabled(features.CSIMigrationPortworx) case csilibplugins.RBDVolumePluginName: diff --git a/pkg/volume/csimigration/plugin_manager_test.go b/pkg/volume/csimigration/plugin_manager_test.go index eab04073dcef2..1d2c6f19cd7b8 100644 --- a/pkg/volume/csimigration/plugin_manager_test.go +++ b/pkg/volume/csimigration/plugin_manager_test.go @@ -123,28 +123,6 @@ func TestMigrationFeatureFlagStatus(t *testing.T) { csiMigrationResult: true, csiMigrationCompleteResult: true, }, - { - name: "vsphere-volume migration flag enabled and migration-complete flag disabled with CSI migration flag enabled", - pluginName: "kubernetes.io/vsphere-volume", - pluginFeature: features.CSIMigrationvSphere, - pluginFeatureEnabled: true, - csiMigrationEnabled: true, - inTreePluginUnregister: features.InTreePluginvSphereUnregister, - inTreePluginUnregisterEnabled: false, - csiMigrationResult: true, - csiMigrationCompleteResult: false, - }, - { - name: "vsphere-volume migration flag enabled and migration-complete flag enabled with CSI migration flag enabled", - pluginName: "kubernetes.io/vsphere-volume", - pluginFeature: features.CSIMigrationvSphere, - pluginFeatureEnabled: true, - csiMigrationEnabled: true, - inTreePluginUnregister: features.InTreePluginvSphereUnregister, - inTreePluginUnregisterEnabled: true, - csiMigrationResult: true, - csiMigrationCompleteResult: true, - }, } csiTranslator := csitrans.New() for _, test := range testCases { diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 0b7b4e87e1cbb..14cee160a630a 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -1057,7 +1057,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Containers: []v1.Container{ { Name: "pv-recycler", - Image: "registry.k8s.io/debian-base:v2.0.0", + Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.0", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ diff --git a/pkg/volume/util/hostutil/hostutil_test.go b/pkg/volume/util/hostutil/hostutil_test.go index 886421d4f33c0..4e68c6f1a9a00 100644 --- a/pkg/volume/util/hostutil/hostutil_test.go +++ b/pkg/volume/util/hostutil/hostutil_test.go @@ -119,11 +119,6 @@ func createSocketFile(socketDir string) (string, error) { } func TestGetFileType(t *testing.T) { - // Skip tests that fail on Windows, as discussed during the SIG Testing meeting from January 10, 2023 - if goruntime.GOOS == "windows" { - t.Skip("Skipping test that fails on Windows") - } - hu := NewHostUtil() testCase := []struct { diff --git a/pkg/volume/util/hostutil/hostutil_windows.go b/pkg/volume/util/hostutil/hostutil_windows.go index c039ada4066f2..51ad0344a1336 100644 --- a/pkg/volume/util/hostutil/hostutil_windows.go +++ b/pkg/volume/util/hostutil/hostutil_windows.go @@ -21,12 +21,16 @@ package hostutil import ( "fmt" + "io/fs" "os" "path" "path/filepath" "strings" + "syscall" + "golang.org/x/sys/windows" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/mount-utils" utilpath "k8s.io/utils/path" ) @@ -87,9 +91,28 @@ func (hu *HostUtil) MakeRShared(path string) error { return nil } +func isSystemCannotAccessErr(err error) bool { + if fserr, ok := err.(*fs.PathError); ok { + errno, ok := fserr.Err.(syscall.Errno) + return ok && errno == windows.ERROR_CANT_ACCESS_FILE + } + + return false +} + // GetFileType checks for sockets/block/character devices func (hu *(HostUtil)) GetFileType(pathname string) (FileType, error) { - return getFileType(pathname) + filetype, err := getFileType(pathname) + + // os.Stat will return a 1920 error (windows.ERROR_CANT_ACCESS_FILE) if we use it on a Unix Socket + // on Windows. In this case, we need to use a different method to check if it's a Unix Socket. + if isSystemCannotAccessErr(err) { + if isSocket, errSocket := filesystem.IsUnixDomainSocket(pathname); errSocket == nil && isSocket { + return FileTypeSocket, nil + } + } + + return filetype, err } // PathExists checks whether the path exists diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index bd59e865cbe04..a818357256428 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -506,9 +506,13 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( migrated := getMigratedStatusBySpec(volumeToDetach.VolumeSpec) if err != nil { - // On failure, add volume back to ReportAsAttached list - actualStateOfWorld.AddVolumeToReportAsAttached( - logger, volumeToDetach.VolumeName, volumeToDetach.NodeName) + // On failure, mark the volume as uncertain. Attach() must succeed before adding the volume back + // to node status as attached. + uncertainError := actualStateOfWorld.MarkVolumeAsUncertain( + logger, volumeToDetach.VolumeName, volumeToDetach.VolumeSpec, volumeToDetach.NodeName) + if uncertainError != nil { + klog.Errorf("DetachVolume.MarkVolumeAsUncertain failed to add the volume %q to actual state after detach error: %s", volumeToDetach.VolumeName, uncertainError) + } eventErr, detailedErr := volumeToDetach.GenerateError("DetachVolume.Detach failed", err) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } @@ -576,8 +580,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( } // Enforce ReadWriteOncePod access mode if it is the only one present. This is also enforced during scheduling. - if utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod) && - actualStateOfWorld.IsVolumeMountedElsewhere(volumeToMount.VolumeName, volumeToMount.PodName) && + if actualStateOfWorld.IsVolumeMountedElsewhere(volumeToMount.VolumeName, volumeToMount.PodName) && // Because we do not know what access mode the pod intends to use if there are multiple. len(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes) == 1 && v1helper.ContainsAccessMode(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes, v1.ReadWriteOncePod) { @@ -1067,8 +1070,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( migrated := getMigratedStatusBySpec(volumeToMount.VolumeSpec) // Enforce ReadWriteOncePod access mode. This is also enforced during scheduling. - if utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod) && - actualStateOfWorld.IsVolumeMountedElsewhere(volumeToMount.VolumeName, volumeToMount.PodName) && + if actualStateOfWorld.IsVolumeMountedElsewhere(volumeToMount.VolumeName, volumeToMount.PodName) && // Because we do not know what access mode the pod intends to use if there are multiple. len(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes) == 1 && v1helper.ContainsAccessMode(volumeToMount.VolumeSpec.PersistentVolume.Spec.AccessModes, v1.ReadWriteOncePod) { diff --git a/pkg/volume/util/selinux.go b/pkg/volume/util/selinux.go index 22854734f30da..6150ab8dba2eb 100644 --- a/pkg/volume/util/selinux.go +++ b/pkg/volume/util/selinux.go @@ -168,10 +168,6 @@ func SupportsSELinuxContextMount(volumeSpec *volume.Spec, volumePluginMgr *volum // VolumeSupportsSELinuxMount returns true if given volume access mode can support mount with SELinux mount options. func VolumeSupportsSELinuxMount(volumeSpec *volume.Spec) bool { - // Right now, SELinux mount is supported only for ReadWriteOncePod volumes. - if !utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod) { - return false - } if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) { return false } diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index dc7a5334cddd9..e35cce690e1eb 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -616,7 +616,6 @@ func TestMakeAbsolutePath(t *testing.T) { } func TestGetPodVolumeNames(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, true)() tests := []struct { name string diff --git a/pkg/volume/vsphere_volume/vsphere_volume.go b/pkg/volume/vsphere_volume/vsphere_volume.go index 9d5dd3a4a7c2f..29936daebe79a 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/pkg/volume/vsphere_volume/vsphere_volume.go @@ -26,7 +26,6 @@ import ( "runtime" "strings" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" "k8s.io/mount-utils" utilstrings "k8s.io/utils/strings" @@ -37,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/types" volumehelpers "k8s.io/cloud-provider/volume/helpers" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -75,7 +73,7 @@ func (plugin *vsphereVolumePlugin) GetPluginName() string { } func (plugin *vsphereVolumePlugin) IsMigratedToCSI() bool { - return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationvSphere) + return true } func (plugin *vsphereVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) { diff --git a/plugin/pkg/admission/certificates/ctbattest/admission.go b/plugin/pkg/admission/certificates/ctbattest/admission.go index 8b2dfea76615e..77d0786ffb8a8 100644 --- a/plugin/pkg/admission/certificates/ctbattest/admission.go +++ b/plugin/pkg/admission/certificates/ctbattest/admission.go @@ -27,7 +27,9 @@ import ( "k8s.io/component-base/featuregate" "k8s.io/klog/v2" api "k8s.io/kubernetes/pkg/apis/certificates" + kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/registry/rbac" "k8s.io/kubernetes/plugin/pkg/admission/certificates" ) @@ -109,6 +111,11 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, _ admissi return nil } + // Skip the attest check when the semantics of the bundle are unchanged to support storage migration and GC workflows + if a.GetOperation() == admission.Update && rbac.IsOnlyMutatingGCFields(a.GetObject(), a.GetOldObject(), kapihelper.Semantic) { + return nil + } + if !certificates.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "attest", newBundle.Spec.SignerName) { klog.V(4).Infof("user not permitted to attest ClusterTrustBundle %q with signerName %q", newBundle.Name, newBundle.Spec.SignerName) return admission.NewForbidden(a, fmt.Errorf("user not permitted to attest for signerName %q", newBundle.Spec.SignerName)) diff --git a/plugin/pkg/admission/certificates/ctbattest/admission_test.go b/plugin/pkg/admission/certificates/ctbattest/admission_test.go index e8f844c346450..2afd9d4ce6c62 100644 --- a/plugin/pkg/admission/certificates/ctbattest/admission_test.go +++ b/plugin/pkg/admission/certificates/ctbattest/admission_test.go @@ -22,6 +22,7 @@ import ( "fmt" "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" @@ -206,6 +207,51 @@ func TestPluginValidate(t *testing.T) { }, allowed: false, }, + { + description: "should always allow no-op update", + clusterTrustBundleFeatureEnabled: true, + authzErr: errors.New("broken"), + attributes: &testAttributes{ + resource: certificatesapi.Resource("clustertrustbundles"), + oldObj: &certificatesapi.ClusterTrustBundle{ + Spec: certificatesapi.ClusterTrustBundleSpec{ + SignerName: "panda.com/foo", + }, + }, + obj: &certificatesapi.ClusterTrustBundle{ + Spec: certificatesapi.ClusterTrustBundleSpec{ + SignerName: "panda.com/foo", + }, + }, + operation: admission.Update, + }, + allowed: true, + }, + { + description: "should always allow finalizer update", + clusterTrustBundleFeatureEnabled: true, + authzErr: errors.New("broken"), + attributes: &testAttributes{ + resource: certificatesapi.Resource("clustertrustbundles"), + oldObj: &certificatesapi.ClusterTrustBundle{ + Spec: certificatesapi.ClusterTrustBundleSpec{ + SignerName: "panda.com/foo", + }, + }, + obj: &certificatesapi.ClusterTrustBundle{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "something"}, + }, + }, + Spec: certificatesapi.ClusterTrustBundleSpec{ + SignerName: "panda.com/foo", + }, + }, + operation: admission.Update, + }, + allowed: true, + }, } for _, tc := range tests { diff --git a/plugin/pkg/admission/gc/gc_admission.go b/plugin/pkg/admission/gc/gc_admission.go index db0f3c12c102d..e69c9a67ea7d5 100644 --- a/plugin/pkg/admission/gc/gc_admission.go +++ b/plugin/pkg/admission/gc/gc_admission.go @@ -238,7 +238,7 @@ func (a *gcPermissionsEnforcement) ownerRefToDeleteAttributeRecords(ref metav1.O func blockingOwnerRefs(refs []metav1.OwnerReference) []metav1.OwnerReference { var ret []metav1.OwnerReference for _, ref := range refs { - if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion == true { + if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion { ret = append(ret, ref) } } diff --git a/plugin/pkg/admission/imagepolicy/config_test.go b/plugin/pkg/admission/imagepolicy/config_test.go index 8567011ce1639..c9bd13411e13c 100644 --- a/plugin/pkg/admission/imagepolicy/config_test.go +++ b/plugin/pkg/admission/imagepolicy/config_test.go @@ -120,10 +120,10 @@ func TestConfigNormalization(t *testing.T) { } for _, tt := range tests { err := normalizeWebhookConfig(&tt.config) - if err == nil && tt.wantErr == true { + if err == nil && tt.wantErr { t.Errorf("%s: expected error from normalization and didn't have one", tt.test) } - if err != nil && tt.wantErr == false { + if err != nil && !tt.wantErr { t.Errorf("%s: unexpected error from normalization: %v", tt.test, err) } if err == nil && !reflect.DeepEqual(tt.config, tt.normalizedConfig) { diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index 14f2fceb256c1..dff3c889147b0 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -186,9 +186,7 @@ func (l *LimitRanger) GetLimitRanges(a admission.Attributes) ([]*corev1.LimitRan } lruEntry := lruItemObj.(liveLookupEntry) - for i := range lruEntry.items { - items = append(items, lruEntry.items[i]) - } + items = append(items, lruEntry.items...) } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index 70716a9b34834..983d41886e154 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -140,7 +140,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) }, } if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { - role.Rules = append(role.Rules, rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie()) + role.Rules = append(role.Rules, rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie()) } return role }()) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index 5493b907811ac..aa886f7017ff9 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -436,6 +436,7 @@ items: - pods/status verbs: - patch + - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/staging/publishing/import-restrictions.yaml b/staging/publishing/import-restrictions.yaml index 0808db2176e42..0c1c12cd810df 100644 --- a/staging/publishing/import-restrictions.yaml +++ b/staging/publishing/import-restrictions.yaml @@ -301,12 +301,7 @@ - baseImportPath: "./vendor/k8s.io/kms/" allowedImports: - - k8s.io/api - - k8s.io/apimachinery - - k8s.io/client-go - - k8s.io/klog - k8s.io/kms - - k8s.io/utils - baseImportPath: "./vendor/k8s.io/endpointslice/" allowedImports: diff --git a/staging/publishing/rules.yaml b/staging/publishing/rules.yaml index e84f273fb8a4e..892b8b89066ff 100644 --- a/staging/publishing/rules.yaml +++ b/staging/publishing/rules.yaml @@ -4,53 +4,63 @@ rules: - name: master source: branch: master - dir: staging/src/k8s.io/code-generator + dirs: + - staging/src/k8s.io/code-generator - name: release-1.25 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.25 - dir: staging/src/k8s.io/code-generator + dirs: + - staging/src/k8s.io/code-generator - name: release-1.26 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.26 - dir: staging/src/k8s.io/code-generator + dirs: + - staging/src/k8s.io/code-generator - name: release-1.27 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.27 - dir: staging/src/k8s.io/code-generator + dirs: + - staging/src/k8s.io/code-generator - name: release-1.28 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.28 - dir: staging/src/k8s.io/code-generator + dirs: + - staging/src/k8s.io/code-generator - destination: apimachinery branches: - name: master source: branch: master - dir: staging/src/k8s.io/apimachinery + dirs: + - staging/src/k8s.io/apimachinery - name: release-1.25 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.25 - dir: staging/src/k8s.io/apimachinery + dirs: + - staging/src/k8s.io/apimachinery - name: release-1.26 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.26 - dir: staging/src/k8s.io/apimachinery + dirs: + - staging/src/k8s.io/apimachinery - name: release-1.27 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.27 - dir: staging/src/k8s.io/apimachinery + dirs: + - staging/src/k8s.io/apimachinery - name: release-1.28 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.28 - dir: staging/src/k8s.io/apimachinery + dirs: + - staging/src/k8s.io/apimachinery library: true - destination: api branches: @@ -60,39 +70,44 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/api + dirs: + - staging/src/k8s.io/api - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/api + dirs: + - staging/src/k8s.io/api - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/api + dirs: + - staging/src/k8s.io/api - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/api + dirs: + - staging/src/k8s.io/api - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/api + dirs: + - staging/src/k8s.io/api library: true - destination: client-go branches: @@ -104,13 +119,14 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/client-go + dirs: + - staging/src/k8s.io/client-go smoke-test: | # assumes GO111MODULE=on go build -mod=mod ./... go test -mod=mod ./... - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -118,13 +134,14 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/client-go + dirs: + - staging/src/k8s.io/client-go smoke-test: | # assumes GO111MODULE=on go build -mod=mod ./... go test -mod=mod ./... - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -132,13 +149,14 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/client-go + dirs: + - staging/src/k8s.io/client-go smoke-test: | # assumes GO111MODULE=on go build -mod=mod ./... go test -mod=mod ./... - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -146,13 +164,14 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/client-go + dirs: + - staging/src/k8s.io/client-go smoke-test: | # assumes GO111MODULE=on go build -mod=mod ./... go test -mod=mod ./... - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -160,7 +179,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/client-go + dirs: + - staging/src/k8s.io/client-go smoke-test: | # assumes GO111MODULE=on go build -mod=mod ./... @@ -178,9 +198,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/component-base + dirs: + - staging/src/k8s.io/component-base - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -190,9 +211,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/component-base + dirs: + - staging/src/k8s.io/component-base - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -202,9 +224,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/component-base + dirs: + - staging/src/k8s.io/component-base - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -214,9 +237,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/component-base + dirs: + - staging/src/k8s.io/component-base - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -226,7 +250,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/component-base + dirs: + - staging/src/k8s.io/component-base library: true - destination: component-helpers branches: @@ -240,9 +265,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/component-helpers + dirs: + - staging/src/k8s.io/component-helpers - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -252,9 +278,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/component-helpers + dirs: + - staging/src/k8s.io/component-helpers - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -264,9 +291,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/component-helpers + dirs: + - staging/src/k8s.io/component-helpers - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -276,9 +304,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/component-helpers + dirs: + - staging/src/k8s.io/component-helpers - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -288,7 +317,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/component-helpers + dirs: + - staging/src/k8s.io/component-helpers library: true - destination: kms branches: @@ -296,20 +326,18 @@ rules: dependencies: - repository: apimachinery branch: master - - repository: api - branch: master - - repository: client-go - branch: master source: branch: master - dir: staging/src/k8s.io/kms + dirs: + - staging/src/k8s.io/kms - name: release-1.26 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.26 - dir: staging/src/k8s.io/kms + dirs: + - staging/src/k8s.io/kms - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -319,9 +347,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kms + dirs: + - staging/src/k8s.io/kms - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -331,7 +360,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kms + dirs: + - staging/src/k8s.io/kms library: true - destination: apiserver branches: @@ -349,9 +379,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/apiserver + dirs: + - staging/src/k8s.io/apiserver - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -363,9 +394,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/apiserver + dirs: + - staging/src/k8s.io/apiserver - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -379,9 +411,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/apiserver + dirs: + - staging/src/k8s.io/apiserver - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -395,9 +428,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/apiserver + dirs: + - staging/src/k8s.io/apiserver - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -411,7 +445,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/apiserver + dirs: + - staging/src/k8s.io/apiserver library: true - destination: kube-aggregator branches: @@ -433,9 +468,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/kube-aggregator + dirs: + - staging/src/k8s.io/kube-aggregator - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -451,9 +487,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/kube-aggregator + dirs: + - staging/src/k8s.io/kube-aggregator - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -471,9 +508,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/kube-aggregator + dirs: + - staging/src/k8s.io/kube-aggregator - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -491,9 +529,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kube-aggregator + dirs: + - staging/src/k8s.io/kube-aggregator - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -511,7 +550,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kube-aggregator + dirs: + - staging/src/k8s.io/kube-aggregator - destination: sample-apiserver branches: - name: master @@ -532,14 +572,15 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/sample-apiserver + dirs: + - staging/src/k8s.io/sample-apiserver required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -555,14 +596,15 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/sample-apiserver + dirs: + - staging/src/k8s.io/sample-apiserver required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -580,14 +622,15 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/sample-apiserver + dirs: + - staging/src/k8s.io/sample-apiserver required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -605,14 +648,15 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/sample-apiserver + dirs: + - staging/src/k8s.io/sample-apiserver required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -630,7 +674,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/sample-apiserver + dirs: + - staging/src/k8s.io/sample-apiserver required-packages: - k8s.io/code-generator smoke-test: | @@ -650,14 +695,15 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/sample-controller + dirs: + - staging/src/k8s.io/sample-controller required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -669,14 +715,15 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/sample-controller + dirs: + - staging/src/k8s.io/sample-controller required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -688,14 +735,15 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/sample-controller + dirs: + - staging/src/k8s.io/sample-controller required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -707,14 +755,15 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/sample-controller + dirs: + - staging/src/k8s.io/sample-controller required-packages: - k8s.io/code-generator smoke-test: | # assumes GO111MODULE=on go build -mod=mod . - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -726,7 +775,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/sample-controller + dirs: + - staging/src/k8s.io/sample-controller required-packages: - k8s.io/code-generator smoke-test: | @@ -752,11 +802,12 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/apiextensions-apiserver + dirs: + - staging/src/k8s.io/apiextensions-apiserver required-packages: - k8s.io/code-generator - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -772,11 +823,12 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/apiextensions-apiserver + dirs: + - staging/src/k8s.io/apiextensions-apiserver required-packages: - k8s.io/code-generator - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -794,11 +846,12 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/apiextensions-apiserver + dirs: + - staging/src/k8s.io/apiextensions-apiserver required-packages: - k8s.io/code-generator - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -816,11 +869,12 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/apiextensions-apiserver + dirs: + - staging/src/k8s.io/apiextensions-apiserver required-packages: - k8s.io/code-generator - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -838,7 +892,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/apiextensions-apiserver + dirs: + - staging/src/k8s.io/apiextensions-apiserver required-packages: - k8s.io/code-generator - destination: metrics @@ -855,9 +910,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/metrics + dirs: + - staging/src/k8s.io/metrics - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -869,9 +925,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/metrics + dirs: + - staging/src/k8s.io/metrics - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -883,9 +940,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/metrics + dirs: + - staging/src/k8s.io/metrics - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -897,9 +955,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/metrics + dirs: + - staging/src/k8s.io/metrics - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -911,7 +970,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/metrics + dirs: + - staging/src/k8s.io/metrics library: true - destination: cli-runtime branches: @@ -925,9 +985,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/cli-runtime + dirs: + - staging/src/k8s.io/cli-runtime - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -937,9 +998,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/cli-runtime + dirs: + - staging/src/k8s.io/cli-runtime - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -949,9 +1011,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/cli-runtime + dirs: + - staging/src/k8s.io/cli-runtime - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -961,9 +1024,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/cli-runtime + dirs: + - staging/src/k8s.io/cli-runtime - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -973,7 +1037,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/cli-runtime + dirs: + - staging/src/k8s.io/cli-runtime library: true - destination: sample-cli-plugin branches: @@ -989,9 +1054,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/sample-cli-plugin + dirs: + - staging/src/k8s.io/sample-cli-plugin - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -1003,9 +1069,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/sample-cli-plugin + dirs: + - staging/src/k8s.io/sample-cli-plugin - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -1017,9 +1084,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/sample-cli-plugin + dirs: + - staging/src/k8s.io/sample-cli-plugin - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -1031,9 +1099,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/sample-cli-plugin + dirs: + - staging/src/k8s.io/sample-cli-plugin - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -1045,7 +1114,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/sample-cli-plugin + dirs: + - staging/src/k8s.io/sample-cli-plugin - destination: kube-proxy branches: - name: master @@ -1060,9 +1130,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/kube-proxy + dirs: + - staging/src/k8s.io/kube-proxy - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -1074,9 +1145,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/kube-proxy + dirs: + - staging/src/k8s.io/kube-proxy - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -1088,9 +1160,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/kube-proxy + dirs: + - staging/src/k8s.io/kube-proxy - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -1102,9 +1175,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kube-proxy + dirs: + - staging/src/k8s.io/kube-proxy - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -1116,34 +1190,40 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kube-proxy + dirs: + - staging/src/k8s.io/kube-proxy library: true - destination: cri-api branches: - name: master source: branch: master - dir: staging/src/k8s.io/cri-api + dirs: + - staging/src/k8s.io/cri-api - name: release-1.25 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.25 - dir: staging/src/k8s.io/cri-api + dirs: + - staging/src/k8s.io/cri-api - name: release-1.26 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.26 - dir: staging/src/k8s.io/cri-api + dirs: + - staging/src/k8s.io/cri-api - name: release-1.27 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.27 - dir: staging/src/k8s.io/cri-api + dirs: + - staging/src/k8s.io/cri-api - name: release-1.28 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.28 - dir: staging/src/k8s.io/cri-api + dirs: + - staging/src/k8s.io/cri-api library: true - destination: kubelet branches: @@ -1165,9 +1245,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/kubelet + dirs: + - staging/src/k8s.io/kubelet - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -1179,9 +1260,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/kubelet + dirs: + - staging/src/k8s.io/kubelet - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -1193,9 +1275,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/kubelet + dirs: + - staging/src/k8s.io/kubelet - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -1207,9 +1290,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kubelet + dirs: + - staging/src/k8s.io/kubelet - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -1227,7 +1311,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kubelet + dirs: + - staging/src/k8s.io/kubelet library: true - destination: kube-scheduler branches: @@ -1243,9 +1328,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/kube-scheduler + dirs: + - staging/src/k8s.io/kube-scheduler - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -1257,9 +1343,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/kube-scheduler + dirs: + - staging/src/k8s.io/kube-scheduler - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -1271,9 +1358,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/kube-scheduler + dirs: + - staging/src/k8s.io/kube-scheduler - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -1285,9 +1373,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kube-scheduler + dirs: + - staging/src/k8s.io/kube-scheduler - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -1299,7 +1388,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kube-scheduler + dirs: + - staging/src/k8s.io/kube-scheduler library: true - destination: controller-manager branches: @@ -1319,9 +1409,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/controller-manager + dirs: + - staging/src/k8s.io/controller-manager - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -1335,9 +1426,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/controller-manager + dirs: + - staging/src/k8s.io/controller-manager - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -1353,9 +1445,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/controller-manager + dirs: + - staging/src/k8s.io/controller-manager - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -1371,9 +1464,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/controller-manager + dirs: + - staging/src/k8s.io/controller-manager - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -1389,7 +1483,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/controller-manager + dirs: + - staging/src/k8s.io/controller-manager library: true - destination: cloud-provider branches: @@ -1413,9 +1508,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/cloud-provider + dirs: + - staging/src/k8s.io/cloud-provider - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -1433,9 +1529,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/cloud-provider + dirs: + - staging/src/k8s.io/cloud-provider - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -1455,9 +1552,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/cloud-provider + dirs: + - staging/src/k8s.io/cloud-provider - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -1477,9 +1575,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/cloud-provider + dirs: + - staging/src/k8s.io/cloud-provider - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -1499,7 +1598,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/cloud-provider + dirs: + - staging/src/k8s.io/cloud-provider library: true - destination: kube-controller-manager branches: @@ -1525,9 +1625,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/kube-controller-manager + dirs: + - staging/src/k8s.io/kube-controller-manager - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -1547,9 +1648,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/kube-controller-manager + dirs: + - staging/src/k8s.io/kube-controller-manager - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -1571,9 +1673,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/kube-controller-manager + dirs: + - staging/src/k8s.io/kube-controller-manager - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -1595,9 +1698,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kube-controller-manager + dirs: + - staging/src/k8s.io/kube-controller-manager - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -1619,7 +1723,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kube-controller-manager + dirs: + - staging/src/k8s.io/kube-controller-manager library: true - destination: cluster-bootstrap branches: @@ -1631,9 +1736,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/cluster-bootstrap + dirs: + - staging/src/k8s.io/cluster-bootstrap - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.25 @@ -1641,9 +1747,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/cluster-bootstrap + dirs: + - staging/src/k8s.io/cluster-bootstrap - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -1651,9 +1758,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/cluster-bootstrap + dirs: + - staging/src/k8s.io/cluster-bootstrap - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -1661,9 +1769,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/cluster-bootstrap + dirs: + - staging/src/k8s.io/cluster-bootstrap - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -1671,7 +1780,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/cluster-bootstrap + dirs: + - staging/src/k8s.io/cluster-bootstrap library: true - destination: csi-translation-lib branches: @@ -1683,9 +1793,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/csi-translation-lib + dirs: + - staging/src/k8s.io/csi-translation-lib - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -1693,9 +1804,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/csi-translation-lib + dirs: + - staging/src/k8s.io/csi-translation-lib - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -1703,9 +1815,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/csi-translation-lib + dirs: + - staging/src/k8s.io/csi-translation-lib - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -1713,9 +1826,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/csi-translation-lib + dirs: + - staging/src/k8s.io/csi-translation-lib - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -1723,34 +1837,40 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/csi-translation-lib + dirs: + - staging/src/k8s.io/csi-translation-lib library: true - destination: mount-utils branches: - name: master source: branch: master - dir: staging/src/k8s.io/mount-utils + dirs: + - staging/src/k8s.io/mount-utils - name: release-1.25 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.25 - dir: staging/src/k8s.io/mount-utils + dirs: + - staging/src/k8s.io/mount-utils - name: release-1.26 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.26 - dir: staging/src/k8s.io/mount-utils + dirs: + - staging/src/k8s.io/mount-utils - name: release-1.27 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.27 - dir: staging/src/k8s.io/mount-utils + dirs: + - staging/src/k8s.io/mount-utils - name: release-1.28 - go: 1.20.8 + go: 1.20.10 source: branch: release-1.28 - dir: staging/src/k8s.io/mount-utils + dirs: + - staging/src/k8s.io/mount-utils library: true - destination: legacy-cloud-providers branches: @@ -1776,9 +1896,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/legacy-cloud-providers + dirs: + - staging/src/k8s.io/legacy-cloud-providers - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -1802,9 +1923,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/legacy-cloud-providers + dirs: + - staging/src/k8s.io/legacy-cloud-providers - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -1830,9 +1952,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/legacy-cloud-providers + dirs: + - staging/src/k8s.io/legacy-cloud-providers - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -1854,9 +1977,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/legacy-cloud-providers + dirs: + - staging/src/k8s.io/legacy-cloud-providers - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -1878,7 +2002,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/legacy-cloud-providers + dirs: + - staging/src/k8s.io/legacy-cloud-providers library: true - destination: kubectl branches: @@ -1902,9 +2027,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/kubectl + dirs: + - staging/src/k8s.io/kubectl - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -1924,9 +2050,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/kubectl + dirs: + - staging/src/k8s.io/kubectl - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -1946,9 +2073,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/kubectl + dirs: + - staging/src/k8s.io/kubectl - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -1968,9 +2096,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/kubectl + dirs: + - staging/src/k8s.io/kubectl - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -1990,7 +2119,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/kubectl + dirs: + - staging/src/k8s.io/kubectl library: true - destination: pod-security-admission branches: @@ -2010,9 +2140,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/pod-security-admission + dirs: + - staging/src/k8s.io/pod-security-admission - name: release-1.25 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.25 @@ -2026,9 +2157,10 @@ rules: branch: release-1.25 source: branch: release-1.25 - dir: staging/src/k8s.io/pod-security-admission + dirs: + - staging/src/k8s.io/pod-security-admission - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.26 @@ -2044,9 +2176,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/pod-security-admission + dirs: + - staging/src/k8s.io/pod-security-admission - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.27 @@ -2062,9 +2195,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/pod-security-admission + dirs: + - staging/src/k8s.io/pod-security-admission - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -2080,7 +2214,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/pod-security-admission + dirs: + - staging/src/k8s.io/pod-security-admission library: true - destination: dynamic-resource-allocation branches: @@ -2102,9 +2237,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/dynamic-resource-allocation + dirs: + - staging/src/k8s.io/dynamic-resource-allocation - name: release-1.26 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.26 @@ -2118,9 +2254,10 @@ rules: branch: release-1.26 source: branch: release-1.26 - dir: staging/src/k8s.io/dynamic-resource-allocation + dirs: + - staging/src/k8s.io/dynamic-resource-allocation - name: release-1.27 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.27 @@ -2134,9 +2271,10 @@ rules: branch: release-1.27 source: branch: release-1.27 - dir: staging/src/k8s.io/dynamic-resource-allocation + dirs: + - staging/src/k8s.io/dynamic-resource-allocation - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: apimachinery branch: release-1.28 @@ -2154,7 +2292,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/dynamic-resource-allocation + dirs: + - staging/src/k8s.io/dynamic-resource-allocation - destination: endpointslice branches: - name: master @@ -2169,9 +2308,10 @@ rules: branch: master source: branch: master - dir: staging/src/k8s.io/endpointslice + dirs: + - staging/src/k8s.io/endpointslice - name: release-1.28 - go: 1.20.8 + go: 1.20.10 dependencies: - repository: api branch: release-1.28 @@ -2183,7 +2323,8 @@ rules: branch: release-1.28 source: branch: release-1.28 - dir: staging/src/k8s.io/endpointslice + dirs: + - staging/src/k8s.io/endpointslice recursive-delete-patterns: - '*/.gitattributes' -default-go-version: 1.21.1 +default-go-version: 1.21.3 diff --git a/staging/src/k8s.io/api/admissionregistration/v1alpha1/types.go b/staging/src/k8s.io/api/admissionregistration/v1alpha1/types.go index 575456c83866d..bd6b17e15880f 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/staging/src/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -226,7 +226,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } type MatchCondition v1.MatchCondition diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go index c199702fbd022..12c680dc972d6 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/types.go @@ -242,7 +242,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } // ParamKind is a tuple of Group Kind and Version. diff --git a/staging/src/k8s.io/api/batch/v1/generated.proto b/staging/src/k8s.io/api/batch/v1/generated.proto index 4f0822440faa5..0fe2a89f17b49 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.proto +++ b/staging/src/k8s.io/api/batch/v1/generated.proto @@ -229,8 +229,8 @@ message JobSpec { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional int32 backoffLimitPerIndex = 12; @@ -242,8 +242,8 @@ message JobSpec { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional int32 maxFailedIndexes = 13; @@ -398,8 +398,8 @@ message JobStatus { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional string failedIndexes = 10; @@ -512,8 +512,8 @@ message PodFailurePolicyRule { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is alpha-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the diff --git a/staging/src/k8s.io/api/batch/v1/types.go b/staging/src/k8s.io/api/batch/v1/types.go index 8a28614c0b4d4..3ebc61e39c876 100644 --- a/staging/src/k8s.io/api/batch/v1/types.go +++ b/staging/src/k8s.io/api/batch/v1/types.go @@ -218,8 +218,8 @@ type PodFailurePolicyRule struct { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is alpha-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -303,8 +303,8 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` @@ -316,8 +316,8 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` @@ -477,8 +477,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` @@ -535,6 +535,25 @@ const ( JobFailureTarget JobConditionType = "FailureTarget" ) +const ( + // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to + // a failed pod matching a pod failure policy rule + // https://kep.k8s.io/3329 + // This is currently a beta field. + JobReasonPodFailurePolicy string = "PodFailurePolicy" + // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of + // times higher than backOffLimit times. + JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded" + // JobReasponDeadlineExceeded means job duration is past ActiveDeadline + JobReasonDeadlineExceeded string = "DeadlineExceeded" + // JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded" + // JobReasonFailedIndexes means Job has failed indexes. + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonFailedIndexes string = "FailedIndexes" +) + // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. diff --git a/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 43b4e1e7d9441..a21bd77456594 100644 --- a/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,8 +117,8 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", - "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", - "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", @@ -142,7 +142,7 @@ var map_JobStatus = map[string]string{ "failed": "The number of pods which reached phase Failed.", "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -193,7 +193,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { var map_PodFailurePolicyRule = map[string]string{ "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index 200a368ccdc4a..7cded1c22d74a 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -5593,10 +5593,38 @@ func (m *SessionAffinityConfig) XXX_DiscardUnknown() { var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo +func (m *SleepAction) Reset() { *m = SleepAction{} } +func (*SleepAction) ProtoMessage() {} +func (*SleepAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{198} +} +func (m *SleepAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SleepAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SleepAction.Merge(m, src) +} +func (m *SleepAction) XXX_Size() int { + return m.Size() +} +func (m *SleepAction) XXX_DiscardUnknown() { + xxx_messageInfo_SleepAction.DiscardUnknown(m) +} + +var xxx_messageInfo_SleepAction proto.InternalMessageInfo + func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5876,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5904,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5932,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +5960,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +5988,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6016,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6044,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6072,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6100,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{215} + return fileDescriptor_83c10c24ec417dc9, []int{216} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6128,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{216} + return fileDescriptor_83c10c24ec417dc9, []int{217} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6156,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{217} + return fileDescriptor_83c10c24ec417dc9, []int{218} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6184,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{218} + return fileDescriptor_83c10c24ec417dc9, []int{219} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6410,6 +6438,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction") proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") @@ -6440,951 +6469,959 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 15095 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x5c, 0xd9, - 0x75, 0x18, 0xac, 0xd7, 0x8d, 0xad, 0x0f, 0xf6, 0x0b, 0x92, 0x03, 0x62, 0x48, 0x36, 0xe7, 0xcd, - 0x0c, 0x87, 0xb3, 0x81, 0xe2, 0x2c, 0xd2, 0x68, 0x66, 0x34, 0x16, 0x56, 0x12, 0x43, 0x00, 0xec, - 0xb9, 0x0d, 0x92, 0xd2, 0x68, 0xa4, 0x4f, 0x0f, 0xdd, 0x17, 0xc0, 0x13, 0x1a, 0xef, 0xf5, 0xbc, - 0xf7, 0x1a, 0x24, 0xf8, 0x49, 0x65, 0x5b, 0xfe, 0x2c, 0x5b, 0xb2, 0xbf, 0xaf, 0x54, 0x5f, 0xf9, - 0x5b, 0x4a, 0x76, 0xb9, 0xbe, 0xb2, 0xfd, 0xc5, 0x76, 0x64, 0x27, 0x51, 0xe4, 0xd8, 0x8e, 0xe5, - 0x2d, 0x5b, 0xc5, 0x4e, 0xa5, 0x1c, 0xc7, 0x55, 0xb1, 0x5c, 0xe5, 0x0a, 0x6c, 0xd1, 0xa9, 0x72, - 0x5c, 0x95, 0xd8, 0xce, 0xf2, 0x23, 0x41, 0x9c, 0x38, 0x75, 0xd7, 0x77, 0xef, 0x5b, 0xba, 0x1b, - 0x1c, 0x10, 0x1a, 0xa9, 0xe6, 0x5f, 0xf7, 0x39, 0xe7, 0x9e, 0x7b, 0xdf, 0x5d, 0xcf, 0x3d, 0xe7, - 0xdc, 0x73, 0xe0, 0x95, 0xed, 0x97, 0xc2, 0x69, 0xd7, 0xbf, 0xb4, 0xdd, 0x5a, 0x27, 0x81, 0x47, - 0x22, 0x12, 0x5e, 0xda, 0x25, 0x5e, 0xdd, 0x0f, 0x2e, 0x09, 0x84, 0xd3, 0x74, 0x2f, 0xd5, 0xfc, - 0x80, 0x5c, 0xda, 0xbd, 0x7c, 0x69, 0x93, 0x78, 0x24, 0x70, 0x22, 0x52, 0x9f, 0x6e, 0x06, 0x7e, - 0xe4, 0x23, 0xc4, 0x69, 0xa6, 0x9d, 0xa6, 0x3b, 0x4d, 0x69, 0xa6, 0x77, 0x2f, 0x4f, 0x3d, 0xbb, - 0xe9, 0x46, 0x5b, 0xad, 0xf5, 0xe9, 0x9a, 0xbf, 0x73, 0x69, 0xd3, 0xdf, 0xf4, 0x2f, 0x31, 0xd2, - 0xf5, 0xd6, 0x06, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc5, 0xd4, 0x0b, 0x71, 0x35, 0x3b, 0x4e, - 0x6d, 0xcb, 0xf5, 0x48, 0xb0, 0x77, 0xa9, 0xb9, 0xbd, 0xc9, 0xea, 0x0d, 0x48, 0xe8, 0xb7, 0x82, - 0x1a, 0x49, 0x56, 0xdc, 0xb6, 0x54, 0x78, 0x69, 0x87, 0x44, 0x4e, 0x46, 0x73, 0xa7, 0x2e, 0xe5, - 0x95, 0x0a, 0x5a, 0x5e, 0xe4, 0xee, 0xa4, 0xab, 0xf9, 0x40, 0xa7, 0x02, 0x61, 0x6d, 0x8b, 0xec, - 0x38, 0xa9, 0x72, 0xcf, 0xe7, 0x95, 0x6b, 0x45, 0x6e, 0xe3, 0x92, 0xeb, 0x45, 0x61, 0x14, 0x24, - 0x0b, 0xd9, 0xdf, 0xb0, 0xe0, 0xfc, 0xcc, 0xad, 0xea, 0x42, 0xc3, 0x09, 0x23, 0xb7, 0x36, 0xdb, - 0xf0, 0x6b, 0xdb, 0xd5, 0xc8, 0x0f, 0xc8, 0x4d, 0xbf, 0xd1, 0xda, 0x21, 0x55, 0xd6, 0x11, 0xe8, - 0x19, 0x18, 0xd8, 0x65, 0xff, 0x97, 0xe6, 0x27, 0xad, 0xf3, 0xd6, 0xc5, 0xd2, 0xec, 0xd8, 0x6f, - 0xed, 0x97, 0xdf, 0x77, 0x6f, 0xbf, 0x3c, 0x70, 0x53, 0xc0, 0xb1, 0xa2, 0x40, 0x17, 0xa0, 0x6f, - 0x23, 0x5c, 0xdb, 0x6b, 0x92, 0xc9, 0x02, 0xa3, 0x1d, 0x11, 0xb4, 0x7d, 0x8b, 0x55, 0x0a, 0xc5, - 0x02, 0x8b, 0x2e, 0x41, 0xa9, 0xe9, 0x04, 0x91, 0x1b, 0xb9, 0xbe, 0x37, 0x59, 0x3c, 0x6f, 0x5d, - 0xec, 0x9d, 0x1d, 0x17, 0xa4, 0xa5, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0x36, 0x23, 0x20, 0x4e, 0xfd, - 0xba, 0xd7, 0xd8, 0x9b, 0xec, 0x39, 0x6f, 0x5d, 0x1c, 0x88, 0x9b, 0x81, 0x05, 0x1c, 0x2b, 0x0a, - 0xfb, 0xcb, 0x05, 0x18, 0x98, 0xd9, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x43, 0x37, 0x61, 0xc8, 0xf3, - 0xeb, 0x44, 0xfe, 0x67, 0x5f, 0x31, 0xf8, 0xdc, 0xf9, 0xe9, 0xf4, 0x54, 0x9a, 0x5e, 0xd5, 0xe8, - 0x66, 0xc7, 0xee, 0xed, 0x97, 0x87, 0x74, 0x08, 0x36, 0xf8, 0x20, 0x0c, 0x83, 0x4d, 0xbf, 0xae, - 0xd8, 0x16, 0x18, 0xdb, 0x72, 0x16, 0xdb, 0x4a, 0x4c, 0x36, 0x3b, 0x7a, 0x6f, 0xbf, 0x3c, 0xa8, - 0x01, 0xb0, 0xce, 0x04, 0xad, 0xc3, 0x28, 0xfd, 0xeb, 0x45, 0xae, 0xe2, 0x5b, 0x64, 0x7c, 0x1f, - 0xcd, 0xe3, 0xab, 0x91, 0xce, 0x4e, 0xdc, 0xdb, 0x2f, 0x8f, 0x26, 0x80, 0x38, 0xc9, 0xd0, 0xbe, - 0x0b, 0x23, 0x33, 0x51, 0xe4, 0xd4, 0xb6, 0x48, 0x9d, 0x8f, 0x20, 0x7a, 0x01, 0x7a, 0x3c, 0x67, - 0x87, 0x88, 0xf1, 0x3d, 0x2f, 0x3a, 0xb6, 0x67, 0xd5, 0xd9, 0x21, 0x07, 0xfb, 0xe5, 0xb1, 0x1b, - 0x9e, 0xfb, 0x76, 0x4b, 0xcc, 0x0a, 0x0a, 0xc3, 0x8c, 0x1a, 0x3d, 0x07, 0x50, 0x27, 0xbb, 0x6e, - 0x8d, 0x54, 0x9c, 0x68, 0x4b, 0x8c, 0x37, 0x12, 0x65, 0x61, 0x5e, 0x61, 0xb0, 0x46, 0x65, 0xdf, - 0x81, 0xd2, 0xcc, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x3d, 0x44, 0xdb, 0x30, 0xda, 0x0c, 0xc8, 0x06, - 0x09, 0x14, 0x68, 0xd2, 0x3a, 0x5f, 0xbc, 0x38, 0xf8, 0xdc, 0xc5, 0xcc, 0x8f, 0x35, 0x49, 0x17, - 0xbc, 0x28, 0xd8, 0x9b, 0x7d, 0x48, 0xd4, 0x37, 0x9a, 0xc0, 0xe2, 0x24, 0x67, 0xfb, 0x1f, 0x17, - 0xe0, 0xe4, 0xcc, 0xdd, 0x56, 0x40, 0xe6, 0xdd, 0x70, 0x3b, 0x39, 0xc3, 0xeb, 0x6e, 0xb8, 0xbd, - 0x1a, 0xf7, 0x80, 0x9a, 0x5a, 0xf3, 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x16, 0xfa, 0xe9, 0xef, 0x1b, - 0x78, 0x49, 0x7c, 0xf2, 0x84, 0x20, 0x1e, 0x9c, 0x77, 0x22, 0x67, 0x9e, 0xa3, 0xb0, 0xa4, 0x41, - 0x2b, 0x30, 0x58, 0x63, 0x0b, 0x72, 0x73, 0xc5, 0xaf, 0x13, 0x36, 0x98, 0xa5, 0xd9, 0xa7, 0x29, - 0xf9, 0x5c, 0x0c, 0x3e, 0xd8, 0x2f, 0x4f, 0xf2, 0xb6, 0x09, 0x16, 0x1a, 0x0e, 0xeb, 0xe5, 0x91, - 0xad, 0xd6, 0x57, 0x0f, 0xe3, 0x04, 0x19, 0x6b, 0xeb, 0xa2, 0xb6, 0x54, 0x7a, 0xd9, 0x52, 0x19, - 0xca, 0x5e, 0x26, 0xe8, 0x32, 0xf4, 0x6c, 0xbb, 0x5e, 0x7d, 0xb2, 0x8f, 0xf1, 0x3a, 0x4b, 0xc7, - 0xfc, 0x9a, 0xeb, 0xd5, 0x0f, 0xf6, 0xcb, 0xe3, 0x46, 0x73, 0x28, 0x10, 0x33, 0x52, 0xfb, 0x3f, - 0x59, 0x50, 0x66, 0xb8, 0x45, 0xb7, 0x41, 0x2a, 0x24, 0x08, 0xdd, 0x30, 0x22, 0x5e, 0x64, 0x74, - 0xe8, 0x73, 0x00, 0x21, 0xa9, 0x05, 0x24, 0xd2, 0xba, 0x54, 0x4d, 0x8c, 0xaa, 0xc2, 0x60, 0x8d, - 0x8a, 0x6e, 0x08, 0xe1, 0x96, 0x13, 0xb0, 0xf9, 0x25, 0x3a, 0x56, 0x6d, 0x08, 0x55, 0x89, 0xc0, - 0x31, 0x8d, 0xb1, 0x21, 0x14, 0x3b, 0x6d, 0x08, 0xe8, 0xc3, 0x30, 0x1a, 0x57, 0x16, 0x36, 0x9d, - 0x9a, 0xec, 0x40, 0xb6, 0x64, 0xaa, 0x26, 0x0a, 0x27, 0x69, 0xed, 0xbf, 0x69, 0x89, 0xc9, 0x43, - 0xbf, 0xfa, 0x5d, 0xfe, 0xad, 0xf6, 0x2f, 0x5b, 0xd0, 0x3f, 0xeb, 0x7a, 0x75, 0xd7, 0xdb, 0x44, - 0x9f, 0x82, 0x01, 0x7a, 0x36, 0xd5, 0x9d, 0xc8, 0x11, 0xfb, 0xde, 0xfb, 0xb5, 0xb5, 0xa5, 0x8e, - 0x8a, 0xe9, 0xe6, 0xf6, 0x26, 0x05, 0x84, 0xd3, 0x94, 0x9a, 0xae, 0xb6, 0xeb, 0xeb, 0x9f, 0x26, - 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39, 0x31, 0x0c, 0x2b, 0xae, 0xe8, 0x1a, 0xf4, 0x45, 0x4e, - 0xb0, 0x49, 0x22, 0xb1, 0x01, 0x66, 0x6e, 0x54, 0xbc, 0x24, 0xa6, 0x2b, 0x92, 0x78, 0x35, 0x12, - 0x1f, 0x0b, 0x6b, 0xac, 0x28, 0x16, 0x2c, 0xec, 0xff, 0xde, 0x0f, 0xa7, 0xe7, 0xaa, 0x4b, 0x39, - 0xf3, 0xea, 0x02, 0xf4, 0xd5, 0x03, 0x77, 0x97, 0x04, 0xa2, 0x9f, 0x15, 0x97, 0x79, 0x06, 0xc5, - 0x02, 0x8b, 0x5e, 0x82, 0x21, 0x7e, 0x20, 0x5d, 0x75, 0xbc, 0x7a, 0x43, 0x76, 0xf1, 0x09, 0x41, - 0x3d, 0x74, 0x53, 0xc3, 0x61, 0x83, 0xf2, 0x90, 0x93, 0xea, 0x42, 0x62, 0x31, 0xe6, 0x1d, 0x76, - 0x5f, 0xb0, 0x60, 0x8c, 0x57, 0x33, 0x13, 0x45, 0x81, 0xbb, 0xde, 0x8a, 0x48, 0x38, 0xd9, 0xcb, - 0x76, 0xba, 0xb9, 0xac, 0xde, 0xca, 0xed, 0x81, 0xe9, 0x9b, 0x09, 0x2e, 0x7c, 0x13, 0x9c, 0x14, - 0xf5, 0x8e, 0x25, 0xd1, 0x38, 0x55, 0x2d, 0xfa, 0x3e, 0x0b, 0xa6, 0x6a, 0xbe, 0x17, 0x05, 0x7e, - 0xa3, 0x41, 0x82, 0x4a, 0x6b, 0xbd, 0xe1, 0x86, 0x5b, 0x7c, 0x9e, 0x62, 0xb2, 0xc1, 0x76, 0x82, - 0x9c, 0x31, 0x54, 0x44, 0x62, 0x0c, 0xcf, 0xdd, 0xdb, 0x2f, 0x4f, 0xcd, 0xe5, 0xb2, 0xc2, 0x6d, - 0xaa, 0x41, 0xdb, 0x80, 0xe8, 0x51, 0x5a, 0x8d, 0x9c, 0x4d, 0x12, 0x57, 0xde, 0xdf, 0x7d, 0xe5, - 0xa7, 0xee, 0xed, 0x97, 0xd1, 0x6a, 0x8a, 0x05, 0xce, 0x60, 0x8b, 0xde, 0x86, 0x13, 0x14, 0x9a, - 0xfa, 0xd6, 0x81, 0xee, 0xab, 0x9b, 0xbc, 0xb7, 0x5f, 0x3e, 0xb1, 0x9a, 0xc1, 0x04, 0x67, 0xb2, - 0x46, 0xdf, 0x63, 0xc1, 0xe9, 0xf8, 0xf3, 0x17, 0xee, 0x34, 0x1d, 0xaf, 0x1e, 0x57, 0x5c, 0xea, - 0xbe, 0x62, 0xba, 0x27, 0x9f, 0x9e, 0xcb, 0xe3, 0x84, 0xf3, 0x2b, 0x41, 0x1e, 0x4c, 0xd0, 0xa6, - 0x25, 0xeb, 0x86, 0xee, 0xeb, 0x7e, 0xe8, 0xde, 0x7e, 0x79, 0x62, 0x35, 0xcd, 0x03, 0x67, 0x31, - 0x9e, 0x9a, 0x83, 0x93, 0x99, 0xb3, 0x13, 0x8d, 0x41, 0x71, 0x9b, 0x70, 0xa9, 0xab, 0x84, 0xe9, - 0x4f, 0x74, 0x02, 0x7a, 0x77, 0x9d, 0x46, 0x4b, 0x2c, 0x4c, 0xcc, 0xff, 0xbc, 0x5c, 0x78, 0xc9, - 0xb2, 0xff, 0x49, 0x11, 0x46, 0xe7, 0xaa, 0x4b, 0xf7, 0xb5, 0xea, 0xf5, 0x63, 0xaf, 0xd0, 0xf6, - 0xd8, 0x8b, 0x0f, 0xd1, 0x62, 0xee, 0x21, 0xfa, 0xdd, 0x19, 0x4b, 0xb6, 0x87, 0x2d, 0xd9, 0x0f, - 0xe5, 0x2c, 0xd9, 0x23, 0x5e, 0xa8, 0xbb, 0x39, 0xb3, 0xb6, 0x97, 0x0d, 0x60, 0xa6, 0x84, 0xb4, - 0xec, 0xd7, 0x9c, 0x46, 0x72, 0xab, 0x3d, 0xe4, 0xd4, 0x3d, 0x9a, 0x71, 0xac, 0xc1, 0xd0, 0x9c, - 0xd3, 0x74, 0xd6, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, 0x40, 0xd1, 0xa9, 0xd7, 0x99, 0x74, - 0x57, 0x9a, 0x3d, 0x79, 0x6f, 0xbf, 0x5c, 0x9c, 0xa9, 0x53, 0x31, 0x03, 0x14, 0xd5, 0x1e, 0xa6, - 0x14, 0xe8, 0x29, 0xe8, 0xa9, 0x07, 0x7e, 0x73, 0xb2, 0xc0, 0x28, 0xe9, 0x2a, 0xef, 0x99, 0x0f, - 0xfc, 0x66, 0x82, 0x94, 0xd1, 0xd8, 0xbf, 0x59, 0x80, 0x33, 0x73, 0xa4, 0xb9, 0xb5, 0x58, 0xcd, - 0x39, 0x2f, 0x2e, 0xc2, 0xc0, 0x8e, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x6a, 0x36, 0x23, 0x56, - 0x04, 0x0c, 0x2b, 0x2c, 0x3a, 0x0f, 0x3d, 0xcd, 0x58, 0x88, 0x1d, 0x92, 0x02, 0x30, 0x13, 0x5f, - 0x19, 0x86, 0x52, 0xb4, 0x42, 0x12, 0x88, 0x19, 0xa3, 0x28, 0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x13, - 0x4b, 0x02, 0x54, 0x46, 0x10, 0x27, 0x42, 0x42, 0x12, 0xa0, 0x18, 0xac, 0x51, 0xa1, 0x0a, 0x94, - 0xc2, 0xc4, 0xc8, 0x76, 0xb5, 0x34, 0x87, 0x99, 0xa8, 0xa0, 0x46, 0x32, 0x66, 0x62, 0x9c, 0x60, - 0x7d, 0x1d, 0x45, 0x85, 0xaf, 0x17, 0x00, 0xf1, 0x2e, 0xfc, 0x36, 0xeb, 0xb8, 0x1b, 0xe9, 0x8e, - 0xeb, 0x7e, 0x49, 0x1c, 0x55, 0xef, 0xfd, 0x67, 0x0b, 0xce, 0xcc, 0xb9, 0x5e, 0x9d, 0x04, 0x39, - 0x13, 0xf0, 0xc1, 0xdc, 0x9d, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xe7, 0x08, 0xa6, 0x98, 0xfd, - 0x17, 0x16, 0x20, 0xfe, 0xd9, 0xef, 0xba, 0x8f, 0xbd, 0x91, 0xfe, 0xd8, 0x23, 0x98, 0x16, 0xf6, - 0xdf, 0xb6, 0x60, 0x70, 0xae, 0xe1, 0xb8, 0x3b, 0xe2, 0x53, 0xe7, 0x60, 0x5c, 0x2a, 0x8a, 0x18, - 0x58, 0x93, 0xfd, 0xe9, 0xe6, 0x36, 0x8e, 0x93, 0x48, 0x9c, 0xa6, 0x47, 0x1f, 0x87, 0xd3, 0x06, - 0x70, 0x8d, 0xec, 0x34, 0x1b, 0x4e, 0xa4, 0xdf, 0x0a, 0xd8, 0xe9, 0x8f, 0xf3, 0x88, 0x70, 0x7e, - 0x79, 0x7b, 0x19, 0x46, 0xe6, 0x1a, 0x2e, 0xf1, 0xa2, 0xa5, 0xca, 0x9c, 0xef, 0x6d, 0xb8, 0x9b, - 0xe8, 0x65, 0x18, 0x89, 0xdc, 0x1d, 0xe2, 0xb7, 0xa2, 0x2a, 0xa9, 0xf9, 0x1e, 0xbb, 0x6b, 0x5b, - 0x17, 0x7b, 0x67, 0xd1, 0xbd, 0xfd, 0xf2, 0xc8, 0x9a, 0x81, 0xc1, 0x09, 0x4a, 0xfb, 0x0f, 0xe9, - 0x88, 0xfb, 0x3b, 0x4d, 0xdf, 0x23, 0x5e, 0x34, 0xe7, 0x7b, 0x75, 0xae, 0x93, 0x79, 0x19, 0x7a, - 0x22, 0x3a, 0x82, 0xfc, 0xcb, 0x2f, 0xc8, 0xa5, 0x4d, 0xc7, 0xed, 0x60, 0xbf, 0x7c, 0x2a, 0x5d, - 0x82, 0x8d, 0x2c, 0x2b, 0x83, 0x3e, 0x04, 0x7d, 0x61, 0xe4, 0x44, 0xad, 0x50, 0x7c, 0xea, 0x23, - 0x72, 0xfc, 0xab, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0x55, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, - 0xe8, 0xdf, 0x21, 0x61, 0xe8, 0x6c, 0xca, 0xf3, 0x7b, 0x54, 0x94, 0xed, 0x5f, 0xe1, 0x60, 0x2c, - 0xf1, 0xe8, 0x51, 0xe8, 0x25, 0x41, 0xe0, 0x07, 0x62, 0x57, 0x19, 0x16, 0x84, 0xbd, 0x0b, 0x14, - 0x88, 0x39, 0xce, 0xfe, 0x17, 0x16, 0x8c, 0xaa, 0xb6, 0xf2, 0xba, 0x8e, 0xe1, 0xde, 0xf4, 0x26, - 0x40, 0x4d, 0x7e, 0x60, 0xc8, 0xce, 0xbb, 0xc1, 0xe7, 0x2e, 0x64, 0x8a, 0x16, 0xa9, 0x6e, 0x8c, - 0x39, 0x2b, 0x50, 0x88, 0x35, 0x6e, 0xf6, 0xaf, 0x59, 0x30, 0x91, 0xf8, 0xa2, 0x65, 0x37, 0x8c, - 0xd0, 0x5b, 0xa9, 0xaf, 0x9a, 0xee, 0xee, 0xab, 0x68, 0x69, 0xf6, 0x4d, 0x6a, 0xf1, 0x49, 0x88, - 0xf6, 0x45, 0x57, 0xa1, 0xd7, 0x8d, 0xc8, 0x8e, 0xfc, 0x98, 0x47, 0xdb, 0x7e, 0x0c, 0x6f, 0x55, - 0x3c, 0x22, 0x4b, 0xb4, 0x24, 0xe6, 0x0c, 0xec, 0xdf, 0x2c, 0x42, 0x89, 0x4f, 0xdb, 0x15, 0xa7, - 0x79, 0x0c, 0x63, 0xf1, 0x34, 0x94, 0xdc, 0x9d, 0x9d, 0x56, 0xe4, 0xac, 0x8b, 0x03, 0x68, 0x80, - 0x6f, 0x06, 0x4b, 0x12, 0x88, 0x63, 0x3c, 0x5a, 0x82, 0x1e, 0xd6, 0x14, 0xfe, 0x95, 0x4f, 0x64, - 0x7f, 0xa5, 0x68, 0xfb, 0xf4, 0xbc, 0x13, 0x39, 0x5c, 0xf6, 0x53, 0x27, 0x1f, 0x05, 0x61, 0xc6, - 0x02, 0x39, 0x00, 0xeb, 0xae, 0xe7, 0x04, 0x7b, 0x14, 0x36, 0x59, 0x64, 0x0c, 0x9f, 0x6d, 0xcf, - 0x70, 0x56, 0xd1, 0x73, 0xb6, 0xea, 0xc3, 0x62, 0x04, 0xd6, 0x98, 0x4e, 0x7d, 0x10, 0x4a, 0x8a, - 0xf8, 0x30, 0x22, 0xdc, 0xd4, 0x87, 0x61, 0x34, 0x51, 0x57, 0xa7, 0xe2, 0x43, 0xba, 0x04, 0xf8, - 0x2b, 0x6c, 0xcb, 0x10, 0xad, 0x5e, 0xf0, 0x76, 0xc5, 0xce, 0x79, 0x17, 0x4e, 0x34, 0x32, 0xf6, - 0x5e, 0x31, 0xae, 0xdd, 0xef, 0xd5, 0x67, 0xc4, 0x67, 0x9f, 0xc8, 0xc2, 0xe2, 0xcc, 0x3a, 0xa8, - 0x54, 0xe3, 0x37, 0xe9, 0x02, 0x71, 0x1a, 0xfa, 0x05, 0xe1, 0xba, 0x80, 0x61, 0x85, 0xa5, 0xfb, - 0xdd, 0x09, 0xd5, 0xf8, 0x6b, 0x64, 0xaf, 0x4a, 0x1a, 0xa4, 0x16, 0xf9, 0xc1, 0xb7, 0xb4, 0xf9, - 0x67, 0x79, 0xef, 0xf3, 0xed, 0x72, 0x50, 0x30, 0x28, 0x5e, 0x23, 0x7b, 0x7c, 0x28, 0xf4, 0xaf, - 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x5a, 0x30, 0xac, 0xbe, 0xee, 0x18, 0xf6, 0x85, 0x59, 0x73, 0x5f, - 0x38, 0xdb, 0x76, 0x82, 0xe7, 0xec, 0x08, 0x5f, 0x2f, 0xc0, 0x69, 0x45, 0x43, 0x6f, 0x33, 0xfc, - 0x8f, 0x98, 0x55, 0x97, 0xa0, 0xe4, 0x29, 0xbd, 0x9e, 0x65, 0x2a, 0xd4, 0x62, 0xad, 0x5e, 0x4c, - 0x43, 0x85, 0x52, 0x2f, 0x3e, 0x66, 0x87, 0x74, 0x85, 0xb7, 0x50, 0x6e, 0xcf, 0x42, 0xb1, 0xe5, - 0xd6, 0xc5, 0x01, 0xf3, 0x7e, 0xd9, 0xdb, 0x37, 0x96, 0xe6, 0x0f, 0xf6, 0xcb, 0x8f, 0xe4, 0x19, - 0x5b, 0xe8, 0xc9, 0x16, 0x4e, 0xdf, 0x58, 0x9a, 0xc7, 0xb4, 0x30, 0x9a, 0x81, 0x51, 0x79, 0x42, - 0xdf, 0xa4, 0x02, 0xa2, 0xef, 0x89, 0x73, 0x48, 0x69, 0xad, 0xb1, 0x89, 0xc6, 0x49, 0x7a, 0x34, - 0x0f, 0x63, 0xdb, 0xad, 0x75, 0xd2, 0x20, 0x11, 0xff, 0xe0, 0x6b, 0x84, 0xeb, 0x74, 0x4b, 0xf1, - 0x5d, 0xf2, 0x5a, 0x02, 0x8f, 0x53, 0x25, 0xec, 0xbf, 0x66, 0xe7, 0x81, 0xe8, 0xbd, 0x4a, 0xe0, - 0xd3, 0x89, 0x45, 0xb9, 0x7f, 0x2b, 0xa7, 0x73, 0x37, 0xb3, 0xe2, 0x1a, 0xd9, 0x5b, 0xf3, 0xe9, - 0x5d, 0x22, 0x7b, 0x56, 0x18, 0x73, 0xbe, 0xa7, 0xed, 0x9c, 0xff, 0x85, 0x02, 0x9c, 0x54, 0x3d, - 0x60, 0x88, 0xad, 0xdf, 0xee, 0x7d, 0x70, 0x19, 0x06, 0xeb, 0x64, 0xc3, 0x69, 0x35, 0x22, 0x65, - 0x60, 0xe8, 0xe5, 0x46, 0xa6, 0xf9, 0x18, 0x8c, 0x75, 0x9a, 0x43, 0x74, 0xdb, 0xcf, 0x0f, 0xb3, - 0x83, 0x38, 0x72, 0xe8, 0x1c, 0x57, 0xab, 0xc6, 0xca, 0x5d, 0x35, 0x8f, 0x42, 0xaf, 0xbb, 0x43, - 0x05, 0xb3, 0x82, 0x29, 0x6f, 0x2d, 0x51, 0x20, 0xe6, 0x38, 0xf4, 0x38, 0xf4, 0xd7, 0xfc, 0x9d, - 0x1d, 0xc7, 0xab, 0xb3, 0x23, 0xaf, 0x34, 0x3b, 0x48, 0x65, 0xb7, 0x39, 0x0e, 0xc2, 0x12, 0x87, - 0xce, 0x40, 0x8f, 0x13, 0x6c, 0x72, 0xad, 0x4b, 0x69, 0x76, 0x80, 0xd6, 0x34, 0x13, 0x6c, 0x86, - 0x98, 0x41, 0xe9, 0xa5, 0xf1, 0xb6, 0x1f, 0x6c, 0xbb, 0xde, 0xe6, 0xbc, 0x1b, 0x88, 0x25, 0xa1, - 0xce, 0xc2, 0x5b, 0x0a, 0x83, 0x35, 0x2a, 0xb4, 0x08, 0xbd, 0x4d, 0x3f, 0x88, 0xc2, 0xc9, 0x3e, - 0xd6, 0xdd, 0x8f, 0xe4, 0x6c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, - 0x98, 0x17, 0x47, 0xcb, 0xd0, 0x4f, 0xbc, 0xdd, 0xc5, 0xc0, 0xdf, 0x99, 0x9c, 0xc8, 0xe7, 0xb4, - 0xc0, 0x49, 0xf8, 0x34, 0x8b, 0x65, 0x54, 0x01, 0xc6, 0x92, 0x05, 0xfa, 0x10, 0x14, 0x89, 0xb7, - 0x3b, 0xd9, 0xcf, 0x38, 0x4d, 0xe5, 0x70, 0xba, 0xe9, 0x04, 0xf1, 0x9e, 0xbf, 0xe0, 0xed, 0x62, - 0x5a, 0x06, 0x7d, 0x0c, 0x4a, 0x72, 0xc3, 0x08, 0x85, 0x3a, 0x33, 0x73, 0xc2, 0xca, 0x6d, 0x06, - 0x93, 0xb7, 0x5b, 0x6e, 0x40, 0x76, 0x88, 0x17, 0x85, 0xf1, 0x0e, 0x29, 0xb1, 0x21, 0x8e, 0xb9, - 0xa1, 0x1a, 0x0c, 0x05, 0x24, 0x74, 0xef, 0x92, 0x8a, 0xdf, 0x70, 0x6b, 0x7b, 0x93, 0x0f, 0xb1, - 0xe6, 0x3d, 0xd9, 0xb6, 0xcb, 0xb0, 0x56, 0x20, 0x56, 0xb7, 0xeb, 0x50, 0x6c, 0x30, 0x45, 0x6f, - 0xc0, 0x70, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0x51, 0xcb, 0xa4, 0x32, 0x8f, 0x0d, 0x63, 0x1d, 0xc1, - 0xaf, 0x13, 0x71, 0x35, 0x31, 0x06, 0x9b, 0x1c, 0xd0, 0xc7, 0xa4, 0xee, 0x7f, 0xc5, 0x6f, 0x79, - 0x51, 0x38, 0x59, 0x62, 0xed, 0xce, 0xb4, 0xca, 0xde, 0x8c, 0xe9, 0x92, 0xc6, 0x01, 0x5e, 0x18, - 0x1b, 0xac, 0xd0, 0x27, 0x60, 0x98, 0xff, 0xe7, 0xb6, 0xcd, 0x70, 0xf2, 0x24, 0xe3, 0x7d, 0x3e, - 0x9f, 0x37, 0x27, 0x9c, 0x3d, 0x29, 0x98, 0x0f, 0xeb, 0xd0, 0x10, 0x9b, 0xdc, 0x10, 0x86, 0xe1, - 0x86, 0xbb, 0x4b, 0x3c, 0x12, 0x86, 0x95, 0xc0, 0x5f, 0x27, 0x42, 0x55, 0x7b, 0x3a, 0xdb, 0x16, - 0xea, 0xaf, 0x93, 0xd9, 0x71, 0xca, 0x73, 0x59, 0x2f, 0x83, 0x4d, 0x16, 0xe8, 0x06, 0x8c, 0xd0, - 0xbb, 0xb1, 0x1b, 0x33, 0x1d, 0xec, 0xc4, 0x94, 0xdd, 0x07, 0xb1, 0x51, 0x08, 0x27, 0x98, 0xa0, - 0xeb, 0x30, 0xc4, 0xfa, 0xbc, 0xd5, 0xe4, 0x4c, 0x4f, 0x75, 0x62, 0xca, 0x4c, 0xe9, 0x55, 0xad, - 0x08, 0x36, 0x18, 0xa0, 0xd7, 0xa1, 0xd4, 0x70, 0x37, 0x48, 0x6d, 0xaf, 0xd6, 0x20, 0x93, 0x43, - 0x8c, 0x5b, 0xe6, 0x66, 0xb8, 0x2c, 0x89, 0xb8, 0x7c, 0xae, 0xfe, 0xe2, 0xb8, 0x38, 0xba, 0x09, - 0xa7, 0x22, 0x12, 0xec, 0xb8, 0x9e, 0x43, 0x37, 0x31, 0x71, 0x25, 0x64, 0x26, 0xea, 0x61, 0x36, - 0xbb, 0xce, 0x89, 0xd1, 0x38, 0xb5, 0x96, 0x49, 0x85, 0x73, 0x4a, 0xa3, 0x3b, 0x30, 0x99, 0x81, - 0xe1, 0xf3, 0xf6, 0x04, 0xe3, 0xfc, 0xaa, 0xe0, 0x3c, 0xb9, 0x96, 0x43, 0x77, 0xd0, 0x06, 0x87, - 0x73, 0xb9, 0xa3, 0xeb, 0x30, 0xca, 0x76, 0xce, 0x4a, 0xab, 0xd1, 0x10, 0x15, 0x8e, 0xb0, 0x0a, - 0x1f, 0x97, 0x72, 0xc4, 0x92, 0x89, 0x3e, 0xd8, 0x2f, 0x43, 0xfc, 0x0f, 0x27, 0x4b, 0xa3, 0x75, - 0x66, 0x0d, 0x6d, 0x05, 0x6e, 0xb4, 0x47, 0x57, 0x15, 0xb9, 0x13, 0x4d, 0x8e, 0xb6, 0xd5, 0x0c, - 0xe9, 0xa4, 0xca, 0x64, 0xaa, 0x03, 0x71, 0x92, 0x21, 0x3d, 0x0a, 0xc2, 0xa8, 0xee, 0x7a, 0x93, - 0x63, 0xfc, 0x3e, 0x25, 0x77, 0xd2, 0x2a, 0x05, 0x62, 0x8e, 0x63, 0x96, 0x50, 0xfa, 0xe3, 0x3a, - 0x3d, 0x71, 0xc7, 0x19, 0x61, 0x6c, 0x09, 0x95, 0x08, 0x1c, 0xd3, 0x50, 0x21, 0x38, 0x8a, 0xf6, - 0x26, 0x11, 0x23, 0x55, 0x1b, 0xe2, 0xda, 0xda, 0xc7, 0x30, 0x85, 0xdb, 0xeb, 0x30, 0xa2, 0xb6, - 0x09, 0xd6, 0x27, 0xa8, 0x0c, 0xbd, 0x4c, 0xec, 0x13, 0x7a, 0xcc, 0x12, 0x6d, 0x02, 0x13, 0x09, - 0x31, 0x87, 0xb3, 0x26, 0xb8, 0x77, 0xc9, 0xec, 0x5e, 0x44, 0xb8, 0x2e, 0xa2, 0xa8, 0x35, 0x41, - 0x22, 0x70, 0x4c, 0x63, 0xff, 0x0f, 0x2e, 0x3e, 0xc7, 0xa7, 0x44, 0x17, 0xe7, 0xe2, 0x33, 0x30, - 0xb0, 0xe5, 0x87, 0x11, 0xa5, 0x66, 0x75, 0xf4, 0xc6, 0x02, 0xf3, 0x55, 0x01, 0xc7, 0x8a, 0x02, - 0xbd, 0x02, 0xc3, 0x35, 0xbd, 0x02, 0x71, 0xa8, 0xab, 0x6d, 0xc4, 0xa8, 0x1d, 0x9b, 0xb4, 0xe8, - 0x25, 0x18, 0x60, 0xde, 0x3d, 0x35, 0xbf, 0x21, 0xa4, 0x4d, 0x29, 0x99, 0x0c, 0x54, 0x04, 0xfc, - 0x40, 0xfb, 0x8d, 0x15, 0x35, 0xba, 0x00, 0x7d, 0xb4, 0x09, 0x4b, 0x15, 0x71, 0x9c, 0x2a, 0x95, - 0xdc, 0x55, 0x06, 0xc5, 0x02, 0x6b, 0xff, 0x9a, 0xc5, 0x64, 0xa9, 0xf4, 0x9e, 0x8f, 0xae, 0xb2, - 0x43, 0x83, 0x9d, 0x20, 0x9a, 0x4a, 0xec, 0x31, 0xed, 0x24, 0x50, 0xb8, 0x83, 0xc4, 0x7f, 0x6c, - 0x94, 0x44, 0x6f, 0x26, 0x4f, 0x06, 0x2e, 0x50, 0xbc, 0x20, 0xbb, 0x20, 0x79, 0x3a, 0x3c, 0x1c, - 0x1f, 0x71, 0xb4, 0x3d, 0xed, 0x8e, 0x08, 0xfb, 0xff, 0x2c, 0x68, 0xb3, 0xa4, 0x1a, 0x39, 0x11, - 0x41, 0x15, 0xe8, 0xbf, 0xed, 0xb8, 0x91, 0xeb, 0x6d, 0x0a, 0xb9, 0xaf, 0xfd, 0x41, 0xc7, 0x0a, - 0xdd, 0xe2, 0x05, 0xb8, 0xf4, 0x22, 0xfe, 0x60, 0xc9, 0x86, 0x72, 0x0c, 0x5a, 0x9e, 0x47, 0x39, - 0x16, 0xba, 0xe5, 0x88, 0x79, 0x01, 0xce, 0x51, 0xfc, 0xc1, 0x92, 0x0d, 0x7a, 0x0b, 0x40, 0xee, - 0x10, 0xa4, 0x2e, 0xbc, 0x82, 0x9e, 0xe9, 0xcc, 0x74, 0x4d, 0x95, 0x99, 0x1d, 0xa1, 0xb2, 0x51, - 0xfc, 0x1f, 0x6b, 0xfc, 0xec, 0x48, 0x1b, 0x53, 0xbd, 0x31, 0xe8, 0xe3, 0x74, 0x89, 0x3a, 0x41, - 0x44, 0xea, 0x33, 0x91, 0xe8, 0x9c, 0xa7, 0xba, 0xbb, 0x1c, 0xae, 0xb9, 0x3b, 0x44, 0x5f, 0xce, - 0x82, 0x09, 0x8e, 0xf9, 0xd9, 0xbf, 0x54, 0x84, 0xc9, 0xbc, 0xe6, 0xd2, 0x45, 0x43, 0xee, 0xb8, - 0xd1, 0x1c, 0x15, 0x6b, 0x2d, 0x73, 0xd1, 0x2c, 0x08, 0x38, 0x56, 0x14, 0x74, 0xf6, 0x86, 0xee, - 0xa6, 0xbc, 0xdb, 0xf7, 0xc6, 0xb3, 0xb7, 0xca, 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x40, 0x9c, 0x50, - 0xb8, 0x9d, 0x69, 0xb3, 0x1c, 0x33, 0x28, 0x16, 0x58, 0x5d, 0xcb, 0xd8, 0xd3, 0x41, 0xcb, 0x68, - 0x74, 0x51, 0xef, 0xd1, 0x76, 0x11, 0xfa, 0x24, 0xc0, 0x86, 0xeb, 0xb9, 0xe1, 0x16, 0xe3, 0xde, - 0x77, 0x68, 0xee, 0x4a, 0x28, 0x5e, 0x54, 0x5c, 0xb0, 0xc6, 0x11, 0xbd, 0x08, 0x83, 0x6a, 0x03, - 0x59, 0x9a, 0x67, 0x36, 0x78, 0xcd, 0xa7, 0x29, 0xde, 0x4d, 0xe7, 0xb1, 0x4e, 0x67, 0x7f, 0x3a, - 0x39, 0x5f, 0xc4, 0x0a, 0xd0, 0xfa, 0xd7, 0xea, 0xb6, 0x7f, 0x0b, 0xed, 0xfb, 0xd7, 0xfe, 0x66, - 0x1f, 0x8c, 0x1a, 0x95, 0xb5, 0xc2, 0x2e, 0xf6, 0xdc, 0x2b, 0xf4, 0x00, 0x72, 0x22, 0x22, 0xd6, - 0x9f, 0xdd, 0x79, 0xa9, 0xe8, 0x87, 0x14, 0x5d, 0x01, 0xbc, 0x3c, 0xfa, 0x24, 0x94, 0x1a, 0x4e, - 0xc8, 0x34, 0x96, 0x44, 0xac, 0xbb, 0x6e, 0x98, 0xc5, 0x17, 0x42, 0x27, 0x8c, 0xb4, 0x53, 0x9f, - 0xf3, 0x8e, 0x59, 0xd2, 0x93, 0x92, 0xca, 0x57, 0xd2, 0xaf, 0x51, 0x35, 0x82, 0x0a, 0x61, 0x7b, - 0x98, 0xe3, 0xd0, 0x4b, 0x6c, 0x6b, 0xa5, 0xb3, 0x62, 0x8e, 0x4a, 0xa3, 0x6c, 0x9a, 0xf5, 0x1a, - 0x42, 0xb6, 0xc2, 0x61, 0x83, 0x32, 0xbe, 0x93, 0xf5, 0xb5, 0xb9, 0x93, 0x3d, 0x09, 0xfd, 0xec, - 0x87, 0x9a, 0x01, 0x6a, 0x34, 0x96, 0x38, 0x18, 0x4b, 0x7c, 0x72, 0xc2, 0x0c, 0x74, 0x37, 0x61, - 0xe8, 0xad, 0x4f, 0x4c, 0x6a, 0xe6, 0xff, 0x30, 0xc0, 0x77, 0x39, 0x31, 0xe5, 0xb1, 0xc4, 0xa1, - 0x9f, 0xb6, 0x00, 0x39, 0x0d, 0x7a, 0x5b, 0xa6, 0x60, 0x75, 0xb9, 0x01, 0x26, 0x6a, 0xbf, 0xd2, - 0xb1, 0xdb, 0x5b, 0xe1, 0xf4, 0x4c, 0xaa, 0x34, 0xd7, 0x94, 0xbe, 0x2c, 0x9a, 0x88, 0xd2, 0x04, - 0xfa, 0x61, 0xb4, 0xec, 0x86, 0xd1, 0xe7, 0xfe, 0x28, 0x71, 0x38, 0x65, 0x34, 0x09, 0xdd, 0xd0, - 0x2f, 0x5f, 0x83, 0x87, 0xbc, 0x7c, 0x0d, 0xe7, 0x5d, 0xbc, 0xa6, 0x5a, 0xf0, 0x50, 0xce, 0x17, - 0x64, 0xe8, 0x5f, 0xe7, 0x75, 0xfd, 0x6b, 0x07, 0xad, 0xdd, 0xb4, 0xac, 0x63, 0xfa, 0x8d, 0x96, - 0xe3, 0x45, 0x6e, 0xb4, 0xa7, 0xeb, 0x6b, 0x9f, 0x82, 0x91, 0x79, 0x87, 0xec, 0xf8, 0xde, 0x82, - 0x57, 0x6f, 0xfa, 0xae, 0x17, 0xa1, 0x49, 0xe8, 0x61, 0xc2, 0x07, 0xdf, 0x7a, 0x7b, 0x68, 0xef, - 0x61, 0x06, 0xb1, 0x37, 0xe1, 0xe4, 0xbc, 0x7f, 0xdb, 0xbb, 0xed, 0x04, 0xf5, 0x99, 0xca, 0x92, - 0xa6, 0x4f, 0x5a, 0x95, 0xfa, 0x0c, 0x2b, 0xff, 0xb6, 0xa8, 0x95, 0xe4, 0xd7, 0xa1, 0x45, 0xb7, - 0x41, 0x72, 0xb4, 0x7e, 0xff, 0x4f, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x76, 0x67, 0x2b, 0xd7, 0xee, - 0xfc, 0x06, 0x0c, 0x6c, 0xb8, 0xa4, 0x51, 0xc7, 0x64, 0x43, 0xf4, 0xce, 0x13, 0xf9, 0x9e, 0x69, - 0x8b, 0x94, 0x52, 0x6a, 0x79, 0xb9, 0x36, 0x64, 0x51, 0x14, 0xc6, 0x8a, 0x0d, 0xda, 0x86, 0x31, - 0xd9, 0x87, 0x12, 0x2b, 0xf6, 0x83, 0x27, 0xdb, 0x0d, 0xbc, 0xc9, 0xfc, 0xc4, 0xbd, 0xfd, 0xf2, - 0x18, 0x4e, 0xb0, 0xc1, 0x29, 0xc6, 0xe8, 0x0c, 0xf4, 0xec, 0xd0, 0x93, 0xaf, 0x87, 0x75, 0x3f, - 0x53, 0x7f, 0x30, 0x4d, 0x0e, 0x83, 0xda, 0x3f, 0x66, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x1a, 0xad, - 0x23, 0x1e, 0x85, 0xa4, 0x86, 0xa9, 0xd0, 0x59, 0xc3, 0x64, 0xff, 0x9c, 0x05, 0x27, 0x16, 0x76, - 0x9a, 0xd1, 0xde, 0xbc, 0x6b, 0x1a, 0x89, 0x3f, 0x08, 0x7d, 0x3b, 0xa4, 0xee, 0xb6, 0x76, 0xc4, - 0xc8, 0x95, 0xe5, 0xe9, 0xb0, 0xc2, 0xa0, 0x07, 0xfb, 0xe5, 0xe1, 0x6a, 0xe4, 0x07, 0xce, 0x26, - 0xe1, 0x00, 0x2c, 0xc8, 0xd9, 0x19, 0xeb, 0xde, 0x25, 0xcb, 0xee, 0x8e, 0x1b, 0xdd, 0xdf, 0x6c, - 0x17, 0xf6, 0x5d, 0xc9, 0x04, 0xc7, 0xfc, 0xec, 0x6f, 0x58, 0x30, 0x2a, 0xe7, 0xfd, 0x4c, 0xbd, - 0x1e, 0x90, 0x30, 0x44, 0x53, 0x50, 0x70, 0x9b, 0xa2, 0x95, 0x20, 0x5a, 0x59, 0x58, 0xaa, 0xe0, - 0x82, 0xdb, 0x94, 0xe2, 0x3c, 0x3b, 0x80, 0x8a, 0xa6, 0xa9, 0xfb, 0xaa, 0x80, 0x63, 0x45, 0x81, - 0x2e, 0xc2, 0x80, 0xe7, 0xd7, 0xb9, 0x44, 0xcc, 0x45, 0x09, 0x36, 0xc1, 0x56, 0x05, 0x0c, 0x2b, - 0x2c, 0xaa, 0x40, 0x89, 0x3b, 0x42, 0xc6, 0x93, 0xb6, 0x2b, 0x77, 0x4a, 0xf6, 0x65, 0x6b, 0xb2, - 0x24, 0x8e, 0x99, 0xd8, 0xbf, 0x61, 0xc1, 0x90, 0xfc, 0xb2, 0x2e, 0xef, 0x2a, 0x74, 0x69, 0xc5, - 0xf7, 0x94, 0x78, 0x69, 0xd1, 0xbb, 0x06, 0xc3, 0x18, 0x57, 0x8c, 0xe2, 0xa1, 0xae, 0x18, 0x97, - 0x61, 0xd0, 0x69, 0x36, 0x2b, 0xe6, 0xfd, 0x84, 0x4d, 0xa5, 0x99, 0x18, 0x8c, 0x75, 0x1a, 0xfb, - 0x47, 0x0b, 0x30, 0x22, 0xbf, 0xa0, 0xda, 0x5a, 0x0f, 0x49, 0x84, 0xd6, 0xa0, 0xe4, 0xf0, 0x51, - 0x22, 0x72, 0x92, 0x3f, 0x9a, 0xad, 0x37, 0x33, 0x86, 0x34, 0x16, 0xb4, 0x66, 0x64, 0x69, 0x1c, - 0x33, 0x42, 0x0d, 0x18, 0xf7, 0xfc, 0x88, 0x1d, 0xba, 0x0a, 0xdf, 0xce, 0x94, 0x99, 0xe4, 0x7e, - 0x5a, 0x70, 0x1f, 0x5f, 0x4d, 0x72, 0xc1, 0x69, 0xc6, 0x68, 0x41, 0xea, 0x22, 0x8b, 0xf9, 0x4a, - 0x24, 0x7d, 0xe0, 0xb2, 0x55, 0x91, 0xf6, 0xaf, 0x5a, 0x50, 0x92, 0x64, 0xc7, 0x61, 0xb5, 0x5e, - 0x81, 0xfe, 0x90, 0x0d, 0x82, 0xec, 0x1a, 0xbb, 0x5d, 0xc3, 0xf9, 0x78, 0xc5, 0xb2, 0x04, 0xff, - 0x1f, 0x62, 0xc9, 0x83, 0x99, 0xa2, 0x54, 0xf3, 0xdf, 0x25, 0xa6, 0x28, 0xd5, 0x9e, 0x9c, 0x43, - 0xe9, 0x4f, 0x59, 0x9b, 0x35, 0xdd, 0x2e, 0x15, 0x79, 0x9b, 0x01, 0xd9, 0x70, 0xef, 0x24, 0x45, - 0xde, 0x0a, 0x83, 0x62, 0x81, 0x45, 0x6f, 0xc1, 0x50, 0x4d, 0xda, 0x20, 0xe2, 0x15, 0x7e, 0xa1, - 0xad, 0x3d, 0x4c, 0x99, 0x4e, 0xb9, 0x0e, 0x6d, 0x4e, 0x2b, 0x8f, 0x0d, 0x6e, 0xa6, 0xa3, 0x4f, - 0xb1, 0x93, 0xa3, 0x4f, 0xcc, 0x37, 0xdf, 0xed, 0xe5, 0xc7, 0x2d, 0xe8, 0xe3, 0xba, 0xe7, 0xee, - 0x54, 0xff, 0x9a, 0x25, 0x39, 0xee, 0xbb, 0x9b, 0x14, 0x28, 0x24, 0x0d, 0xb4, 0x02, 0x25, 0xf6, - 0x83, 0xe9, 0xce, 0x8b, 0xf9, 0xef, 0x70, 0x78, 0xad, 0x7a, 0x03, 0x6f, 0xca, 0x62, 0x38, 0xe6, - 0x60, 0xff, 0x48, 0x91, 0xee, 0x6e, 0x31, 0xa9, 0x71, 0xe8, 0x5b, 0x0f, 0xee, 0xd0, 0x2f, 0x3c, - 0xa8, 0x43, 0x7f, 0x13, 0x46, 0x6b, 0x9a, 0xdd, 0x39, 0x1e, 0xc9, 0x8b, 0x6d, 0x27, 0x89, 0x66, - 0xa2, 0xe6, 0xda, 0xb9, 0x39, 0x93, 0x09, 0x4e, 0x72, 0x45, 0x1f, 0x87, 0x21, 0x3e, 0xce, 0xa2, - 0x16, 0xee, 0x2b, 0xf5, 0x78, 0xfe, 0x7c, 0xd1, 0xab, 0xe0, 0xda, 0x5c, 0xad, 0x38, 0x36, 0x98, - 0xd9, 0x7f, 0x69, 0x01, 0x5a, 0x68, 0x6e, 0x91, 0x1d, 0x12, 0x38, 0x8d, 0xd8, 0x7c, 0xf4, 0x45, - 0x0b, 0x26, 0x49, 0x0a, 0x3c, 0xe7, 0xef, 0xec, 0x88, 0xcb, 0x62, 0x8e, 0x3e, 0x63, 0x21, 0xa7, - 0x8c, 0x7a, 0xa8, 0x34, 0x99, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x2b, 0x30, 0xc1, 0x4f, 0x49, 0x85, - 0xd0, 0xfc, 0xae, 0x1e, 0x16, 0x8c, 0x27, 0xd6, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0xab, 0xc3, - 0x90, 0xdb, 0x8a, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, 0xde, 0xb3, 0x9b, 0xbd, 0x67, 0x37, 0x7b, - 0xcf, 0x6e, 0xf6, 0x9e, 0xdd, 0xec, 0x5d, 0x6a, 0x37, 0xfb, 0xbf, 0x2c, 0x38, 0xa9, 0x8e, 0x2f, - 0xe3, 0xc2, 0xfe, 0x19, 0x98, 0xe0, 0xcb, 0xcd, 0xf0, 0x31, 0x16, 0xc7, 0xf5, 0xe5, 0xcc, 0x99, - 0x9b, 0xf0, 0x85, 0x37, 0x0a, 0xf2, 0x47, 0x45, 0x19, 0x08, 0x9c, 0x55, 0x8d, 0xfd, 0x4b, 0x03, - 0xd0, 0xbb, 0xb0, 0x4b, 0xbc, 0xe8, 0x18, 0xae, 0x36, 0x35, 0x18, 0x71, 0xbd, 0x5d, 0xbf, 0xb1, - 0x4b, 0xea, 0x1c, 0x7f, 0x98, 0x1b, 0xf8, 0x29, 0xc1, 0x7a, 0x64, 0xc9, 0x60, 0x81, 0x13, 0x2c, - 0x1f, 0x84, 0xf5, 0xe1, 0x0a, 0xf4, 0xf1, 0xc3, 0x47, 0x98, 0x1e, 0x32, 0xf7, 0x6c, 0xd6, 0x89, - 0xe2, 0x48, 0x8d, 0x2d, 0x23, 0xfc, 0x70, 0x13, 0xc5, 0xd1, 0xa7, 0x61, 0x64, 0xc3, 0x0d, 0xc2, - 0x68, 0xcd, 0xdd, 0xa1, 0x47, 0xc3, 0x4e, 0xf3, 0x3e, 0xac, 0x0d, 0xaa, 0x1f, 0x16, 0x0d, 0x4e, - 0x38, 0xc1, 0x19, 0x6d, 0xc2, 0x70, 0xc3, 0xd1, 0xab, 0xea, 0x3f, 0x74, 0x55, 0xea, 0x74, 0x58, - 0xd6, 0x19, 0x61, 0x93, 0x2f, 0x5d, 0x4e, 0x35, 0xa6, 0x30, 0x1f, 0x60, 0xea, 0x0c, 0xb5, 0x9c, - 0xb8, 0xa6, 0x9c, 0xe3, 0xa8, 0x80, 0xc6, 0x1c, 0xd9, 0x4b, 0xa6, 0x80, 0xa6, 0xb9, 0xab, 0x7f, - 0x0a, 0x4a, 0x84, 0x76, 0x21, 0x65, 0x2c, 0x0e, 0x98, 0x4b, 0xdd, 0xb5, 0x75, 0xc5, 0xad, 0x05, - 0xbe, 0x69, 0xe7, 0x59, 0x90, 0x9c, 0x70, 0xcc, 0x14, 0xcd, 0x41, 0x5f, 0x48, 0x02, 0x57, 0xe9, - 0x92, 0xdb, 0x0c, 0x23, 0x23, 0xe3, 0xaf, 0xd6, 0xf8, 0x6f, 0x2c, 0x8a, 0xd2, 0xe9, 0xe5, 0x30, - 0x55, 0x2c, 0x3b, 0x0c, 0xb4, 0xe9, 0x35, 0xc3, 0xa0, 0x58, 0x60, 0xd1, 0xeb, 0xd0, 0x1f, 0x90, - 0x06, 0x33, 0x24, 0x0e, 0x77, 0x3f, 0xc9, 0xb9, 0x5d, 0x92, 0x97, 0xc3, 0x92, 0x01, 0xba, 0x06, - 0x28, 0x20, 0x54, 0xc0, 0x73, 0xbd, 0x4d, 0xe5, 0xde, 0x2d, 0x36, 0x5a, 0x25, 0x48, 0xe3, 0x98, - 0x42, 0x3e, 0x58, 0xc4, 0x19, 0xc5, 0xd0, 0x15, 0x18, 0x57, 0xd0, 0x25, 0x2f, 0x8c, 0x1c, 0xba, - 0xc1, 0x8d, 0x32, 0x5e, 0x4a, 0xbf, 0x82, 0x93, 0x04, 0x38, 0x5d, 0xc6, 0xfe, 0x59, 0x0b, 0x78, - 0x3f, 0x1f, 0x83, 0x56, 0xe1, 0x35, 0x53, 0xab, 0x70, 0x3a, 0x77, 0xe4, 0x72, 0x34, 0x0a, 0x3f, - 0x6b, 0xc1, 0xa0, 0x36, 0xb2, 0xf1, 0x9c, 0xb5, 0xda, 0xcc, 0xd9, 0x16, 0x8c, 0xd1, 0x99, 0x7e, - 0x7d, 0x3d, 0x24, 0xc1, 0x2e, 0xa9, 0xb3, 0x89, 0x59, 0xb8, 0xbf, 0x89, 0xa9, 0x5c, 0x49, 0x97, - 0x13, 0x0c, 0x71, 0xaa, 0x0a, 0xfb, 0x53, 0xb2, 0xa9, 0xca, 0xf3, 0xb6, 0xa6, 0xc6, 0x3c, 0xe1, - 0x79, 0xab, 0x46, 0x15, 0xc7, 0x34, 0x74, 0xa9, 0x6d, 0xf9, 0x61, 0x94, 0xf4, 0xbc, 0xbd, 0xea, - 0x87, 0x11, 0x66, 0x18, 0xfb, 0x79, 0x80, 0x85, 0x3b, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0xe9, 0xb1, - 0xf2, 0x2f, 0x3d, 0xf6, 0xef, 0x59, 0x30, 0xb2, 0x38, 0x67, 0x9c, 0x5c, 0xd3, 0x00, 0xfc, 0xa6, - 0x76, 0xeb, 0xd6, 0xaa, 0x74, 0xff, 0xe0, 0x16, 0x70, 0x05, 0xc5, 0x1a, 0x05, 0x3a, 0x0d, 0xc5, - 0x46, 0xcb, 0x13, 0x6a, 0xcf, 0x7e, 0x7a, 0x3c, 0x2e, 0xb7, 0x3c, 0x4c, 0x61, 0xda, 0x63, 0xa5, - 0x62, 0xd7, 0x8f, 0x95, 0x3a, 0x06, 0x29, 0x41, 0x65, 0xe8, 0xbd, 0x7d, 0xdb, 0xad, 0xf3, 0xa7, - 0xe0, 0xc2, 0x35, 0xe5, 0xd6, 0xad, 0xa5, 0xf9, 0x10, 0x73, 0xb8, 0xfd, 0xa5, 0x22, 0x4c, 0x2d, - 0x36, 0xc8, 0x9d, 0x77, 0xf8, 0x1c, 0xbe, 0xdb, 0xa7, 0x56, 0x87, 0x53, 0x20, 0x1d, 0xf6, 0x39, - 0x5d, 0xe7, 0xfe, 0xd8, 0x80, 0x7e, 0xee, 0x78, 0x2a, 0x1f, 0xc7, 0x67, 0x9a, 0xfb, 0xf2, 0x3b, - 0x64, 0x9a, 0x3b, 0xb0, 0x0a, 0x73, 0x9f, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x53, 0x2f, 0xc3, - 0x90, 0x4e, 0x79, 0xa8, 0x87, 0xad, 0xdf, 0x5b, 0x84, 0x31, 0xda, 0x82, 0x07, 0x3a, 0x10, 0x37, - 0xd2, 0x03, 0x71, 0xd4, 0x8f, 0x1b, 0x3b, 0x8f, 0xc6, 0x5b, 0xc9, 0xd1, 0xb8, 0x9c, 0x37, 0x1a, - 0xc7, 0x3d, 0x06, 0xdf, 0x67, 0xc1, 0xc4, 0x62, 0xc3, 0xaf, 0x6d, 0x27, 0x1e, 0x20, 0xbe, 0x08, - 0x83, 0x74, 0x3b, 0x0e, 0x8d, 0x58, 0x1c, 0x46, 0x74, 0x16, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, - 0xc6, 0x8d, 0xa5, 0xf9, 0xac, 0xa0, 0x2e, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0x77, 0x2c, 0x38, 0x7b, - 0x65, 0x6e, 0x21, 0x9e, 0x8a, 0xa9, 0xb8, 0x32, 0x17, 0xa0, 0xaf, 0x59, 0xd7, 0x9a, 0x12, 0xab, - 0x85, 0xe7, 0x59, 0x2b, 0x04, 0xf6, 0xdd, 0x12, 0x33, 0xe9, 0x06, 0xc0, 0x15, 0x5c, 0x99, 0x13, - 0xfb, 0xae, 0xb4, 0x02, 0x59, 0xb9, 0x56, 0xa0, 0xc7, 0xa1, 0x9f, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, - 0x9b, 0x1b, 0xf4, 0x39, 0x08, 0x4b, 0x9c, 0xfd, 0x33, 0x16, 0x4c, 0x5c, 0x71, 0x23, 0x7a, 0x68, - 0x27, 0x03, 0xa7, 0xd0, 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x06, 0x4e, 0xc1, 0x0a, 0x83, - 0x35, 0x2a, 0xfe, 0x41, 0xbb, 0x2e, 0x7b, 0x49, 0x51, 0x30, 0xed, 0x6e, 0x58, 0xc0, 0xb1, 0xa2, - 0xa0, 0xfd, 0x55, 0x77, 0x03, 0xa6, 0xb2, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x79, 0x89, 0xc0, - 0x31, 0x8d, 0xfd, 0xe7, 0x16, 0x94, 0xaf, 0x34, 0x5a, 0x61, 0x44, 0x82, 0x8d, 0x30, 0x67, 0xd3, - 0x7d, 0x1e, 0x4a, 0x44, 0x1a, 0x08, 0xe4, 0x93, 0x4f, 0x29, 0x88, 0x2a, 0xcb, 0x01, 0x8f, 0xdf, - 0xa2, 0xe8, 0xba, 0x78, 0x25, 0x7d, 0xb8, 0x67, 0xae, 0x8b, 0x80, 0x88, 0x5e, 0x97, 0x1e, 0xd0, - 0x86, 0x45, 0xc6, 0x58, 0x48, 0x61, 0x71, 0x46, 0x09, 0xfb, 0xc7, 0x2c, 0x38, 0xa9, 0x3e, 0xf8, - 0x5d, 0xf7, 0x99, 0xf6, 0xd7, 0x0a, 0x30, 0x7c, 0x75, 0x6d, 0xad, 0x72, 0x85, 0x44, 0xda, 0xac, - 0x6c, 0x6f, 0xf6, 0xc7, 0x9a, 0xf5, 0xb2, 0xdd, 0x1d, 0xb1, 0x15, 0xb9, 0x8d, 0x69, 0x1e, 0x17, - 0x6d, 0x7a, 0xc9, 0x8b, 0xae, 0x07, 0xd5, 0x28, 0x70, 0xbd, 0xcd, 0xcc, 0x99, 0x2e, 0x65, 0x96, - 0x62, 0x9e, 0xcc, 0x82, 0x9e, 0x87, 0x3e, 0x16, 0x98, 0x4d, 0x0e, 0xc2, 0xc3, 0xea, 0x8a, 0xc5, - 0xa0, 0x07, 0xfb, 0xe5, 0xd2, 0x0d, 0xbc, 0xc4, 0xff, 0x60, 0x41, 0x8a, 0x6e, 0xc0, 0xe0, 0x56, - 0x14, 0x35, 0xaf, 0x12, 0xa7, 0x4e, 0x02, 0xb9, 0xcb, 0x9e, 0xcb, 0xda, 0x65, 0x69, 0x27, 0x70, - 0xb2, 0x78, 0x63, 0x8a, 0x61, 0x21, 0xd6, 0xf9, 0xd8, 0x55, 0x80, 0x18, 0x77, 0x44, 0x86, 0x1b, - 0x7b, 0x0d, 0x4a, 0xf4, 0x73, 0x67, 0x1a, 0xae, 0xd3, 0xde, 0x34, 0xfe, 0x34, 0x94, 0xa4, 0xe1, - 0x3b, 0x14, 0x51, 0x1c, 0xd8, 0x89, 0x24, 0xed, 0xe2, 0x21, 0x8e, 0xf1, 0xf6, 0x63, 0x20, 0x7c, - 0x4b, 0xdb, 0xb1, 0xb4, 0x37, 0xe0, 0x04, 0x73, 0x92, 0x75, 0xa2, 0x2d, 0x63, 0x8e, 0x76, 0x9e, - 0x0c, 0xcf, 0x88, 0x7b, 0x1d, 0xff, 0xb2, 0x49, 0xed, 0x71, 0xf2, 0x90, 0xe4, 0x18, 0xdf, 0xf1, - 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x97, 0xaa, 0xf9, 0xe1, 0x87, 0x5e, 0x82, 0x21, 0x2e, 0x2e, 0xd2, - 0xa9, 0xe1, 0x34, 0x44, 0xbd, 0x4a, 0x03, 0xba, 0xa6, 0xe1, 0xb0, 0x41, 0x89, 0xce, 0x42, 0xd1, - 0x7d, 0xdb, 0x4b, 0x3e, 0xdd, 0x5b, 0x7a, 0x63, 0x15, 0x53, 0x38, 0x45, 0x53, 0xc9, 0x93, 0x6f, - 0xe9, 0x0a, 0xad, 0xa4, 0xcf, 0xd7, 0x60, 0xc4, 0x0d, 0x6b, 0xa1, 0xbb, 0xe4, 0xd1, 0x75, 0xaa, - 0xad, 0x74, 0xa5, 0x73, 0xa0, 0x8d, 0x56, 0x58, 0x9c, 0xa0, 0xd6, 0xce, 0x97, 0xde, 0xae, 0xa5, - 0xd7, 0x8e, 0xc1, 0x0f, 0xe8, 0xf6, 0xdf, 0x64, 0x5f, 0x17, 0x32, 0x15, 0xbc, 0xd8, 0xfe, 0xf9, - 0x07, 0x87, 0x58, 0xe2, 0xe8, 0x85, 0xae, 0xb6, 0xe5, 0x34, 0x67, 0x5a, 0xd1, 0xd6, 0xbc, 0x1b, - 0xd6, 0xfc, 0x5d, 0x12, 0xec, 0xb1, 0xbb, 0xf8, 0x40, 0x7c, 0xa1, 0x53, 0x88, 0xb9, 0xab, 0x33, - 0x15, 0x4a, 0x89, 0xd3, 0x65, 0xd0, 0x0c, 0x8c, 0x4a, 0x60, 0x95, 0x84, 0xec, 0x08, 0x18, 0x64, - 0x6c, 0xd4, 0x63, 0x3a, 0x01, 0x56, 0x4c, 0x92, 0xf4, 0xa6, 0x80, 0x0b, 0x47, 0x21, 0xe0, 0x7e, - 0x10, 0x86, 0x5d, 0xcf, 0x8d, 0x5c, 0x27, 0xf2, 0xb9, 0xfd, 0x88, 0x5f, 0xbb, 0x99, 0x82, 0x79, - 0x49, 0x47, 0x60, 0x93, 0xce, 0xfe, 0x37, 0x3d, 0x30, 0xce, 0x86, 0xed, 0xbd, 0x19, 0xf6, 0x9d, - 0x34, 0xc3, 0x6e, 0xa4, 0x67, 0xd8, 0x51, 0x48, 0xee, 0xf7, 0x3d, 0xcd, 0x3e, 0x0d, 0x25, 0xf5, - 0x7e, 0x50, 0x3e, 0x20, 0xb6, 0x72, 0x1e, 0x10, 0x77, 0x3e, 0xbd, 0xa5, 0x4b, 0x5a, 0x31, 0xd3, - 0x25, 0xed, 0x2b, 0x16, 0xc4, 0x86, 0x05, 0xf4, 0x06, 0x94, 0x9a, 0x3e, 0xf3, 0x70, 0x0d, 0xa4, - 0xdb, 0xf8, 0x63, 0x6d, 0x2d, 0x13, 0x3c, 0x02, 0x5b, 0xc0, 0x7b, 0xa1, 0x22, 0x8b, 0xe2, 0x98, - 0x0b, 0xba, 0x06, 0xfd, 0xcd, 0x80, 0x54, 0x23, 0x16, 0x1e, 0xa8, 0x7b, 0x86, 0x7c, 0xd6, 0xf0, - 0x82, 0x58, 0x72, 0xb0, 0xff, 0x9d, 0x05, 0x63, 0x49, 0x52, 0xf4, 0x2a, 0xf4, 0x90, 0x3b, 0xa4, - 0x26, 0xda, 0x9b, 0x79, 0x14, 0xc7, 0xaa, 0x09, 0xde, 0x01, 0xf4, 0x3f, 0x66, 0xa5, 0xd0, 0x55, - 0xe8, 0xa7, 0xe7, 0xf0, 0x15, 0x15, 0x0a, 0xef, 0x91, 0xbc, 0xb3, 0x5c, 0x09, 0x34, 0xbc, 0x71, - 0x02, 0x84, 0x65, 0x71, 0xe6, 0x07, 0x56, 0x6b, 0x56, 0xe9, 0x15, 0x27, 0x6a, 0x77, 0x13, 0x5f, - 0x9b, 0xab, 0x70, 0x22, 0xc1, 0x8d, 0xfb, 0x81, 0x49, 0x20, 0x8e, 0x99, 0xd8, 0xbf, 0x60, 0x01, - 0x70, 0xb7, 0x37, 0xc7, 0xdb, 0x24, 0xc7, 0xa0, 0x4d, 0x9f, 0x87, 0x9e, 0xb0, 0x49, 0x6a, 0xed, - 0x9c, 0xaf, 0xe3, 0xf6, 0x54, 0x9b, 0xa4, 0x16, 0xcf, 0x38, 0xfa, 0x0f, 0xb3, 0xd2, 0xf6, 0xf7, - 0x03, 0x8c, 0xc4, 0x64, 0x4b, 0x11, 0xd9, 0x41, 0xcf, 0x1a, 0x41, 0x47, 0x4e, 0x27, 0x82, 0x8e, - 0x94, 0x18, 0xb5, 0xa6, 0xb8, 0xfd, 0x34, 0x14, 0x77, 0x9c, 0x3b, 0x42, 0x33, 0xf7, 0x74, 0xfb, - 0x66, 0x50, 0xfe, 0xd3, 0x2b, 0xce, 0x1d, 0x7e, 0x79, 0x7d, 0x5a, 0xae, 0x90, 0x15, 0xe7, 0x4e, - 0x47, 0x07, 0x61, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x47, 0x57, 0x57, 0x75, 0xb9, 0x5e, 0xb2, - 0x2e, 0xd7, 0xeb, 0xa2, 0x2e, 0xd7, 0x43, 0x77, 0xa1, 0x5f, 0x38, 0x5c, 0x8a, 0xb0, 0x64, 0x97, - 0xba, 0xa8, 0x4f, 0xf8, 0x6b, 0xf2, 0x3a, 0x2f, 0xc9, 0xcb, 0xb9, 0x80, 0x76, 0xac, 0x57, 0x56, - 0x88, 0xfe, 0x6f, 0x0b, 0x46, 0xc4, 0x6f, 0x4c, 0xde, 0x6e, 0x91, 0x30, 0x12, 0xc2, 0xeb, 0x07, - 0xba, 0x6f, 0x83, 0x28, 0xc8, 0x9b, 0xf2, 0x01, 0x79, 0xce, 0x98, 0xc8, 0x8e, 0x2d, 0x4a, 0xb4, - 0x02, 0xfd, 0x2d, 0x0b, 0x4e, 0xec, 0x38, 0x77, 0x78, 0x8d, 0x1c, 0x86, 0x9d, 0xc8, 0xf5, 0x85, - 0xe3, 0xc2, 0xab, 0xdd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x52, 0x9e, 0xc8, 0x22, 0xe9, - 0xd8, 0xd4, 0xcc, 0x76, 0x4d, 0x6d, 0xc0, 0x80, 0x9c, 0x6f, 0x0f, 0xd2, 0xbb, 0x9b, 0xd5, 0x23, - 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa4, 0xcf, 0xb1, 0x07, 0x5a, 0xd7, 0xdb, 0x30, 0x91, - 0x31, 0x97, 0x1e, 0x68, 0x95, 0xb7, 0xe1, 0x74, 0xee, 0xfc, 0x78, 0xa0, 0xde, 0xf9, 0x5f, 0xb3, - 0xf4, 0x7d, 0xf0, 0x18, 0x4c, 0x1a, 0x73, 0xa6, 0x49, 0xe3, 0x5c, 0xfb, 0x95, 0x93, 0x63, 0xd7, - 0x78, 0x4b, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0xeb, 0xd0, 0xd7, 0xa0, 0x10, 0xe9, 0xb6, 0x6b, 0x77, - 0x5e, 0x91, 0xb1, 0x30, 0xc9, 0xe0, 0x21, 0x16, 0x1c, 0xec, 0x5f, 0xb6, 0xa0, 0xe7, 0x18, 0x7a, - 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, 0xa1, 0x7d, 0x1a, 0x3b, 0xb7, 0x17, 0xee, 0x44, - 0xc4, 0x0b, 0xd9, 0x89, 0x9c, 0xd9, 0x31, 0xfb, 0x16, 0x4c, 0x2c, 0xfb, 0x4e, 0x7d, 0xd6, 0x69, - 0x38, 0x5e, 0x8d, 0x04, 0x4b, 0xde, 0xe6, 0xa1, 0x7c, 0xce, 0x0b, 0x1d, 0x7d, 0xce, 0x5f, 0x82, - 0x3e, 0xb7, 0xa9, 0x45, 0x9c, 0x3e, 0x4f, 0x3b, 0x70, 0xa9, 0x22, 0x82, 0x4d, 0x23, 0xa3, 0x72, - 0x06, 0xc5, 0x82, 0x9e, 0x8e, 0x3c, 0x77, 0xf6, 0xea, 0xc9, 0x1f, 0x79, 0x2a, 0x83, 0x27, 0x03, - 0x38, 0x19, 0x6e, 0xc9, 0x5b, 0x60, 0x54, 0x21, 0xde, 0x6c, 0x61, 0xe8, 0x77, 0xf9, 0x97, 0x8a, - 0xe1, 0x7f, 0x22, 0x5b, 0x36, 0x4e, 0x75, 0x8c, 0xf6, 0x1a, 0x89, 0x03, 0xb0, 0x64, 0x64, 0xbf, - 0x04, 0x99, 0x01, 0x37, 0x3a, 0xeb, 0x3d, 0xec, 0x8f, 0xc1, 0x38, 0x2b, 0x79, 0x48, 0x9d, 0x82, - 0x9d, 0xd0, 0xd6, 0x66, 0x04, 0x0f, 0xb5, 0xbf, 0x60, 0xc1, 0xe8, 0x6a, 0x22, 0xa6, 0xe2, 0x05, - 0x66, 0xdf, 0xcd, 0x30, 0x12, 0x54, 0x19, 0x14, 0x0b, 0xec, 0x91, 0x2b, 0xd1, 0xfe, 0xda, 0x82, - 0x38, 0x06, 0xce, 0x31, 0x08, 0x7e, 0x73, 0x86, 0xe0, 0x97, 0x29, 0x02, 0xab, 0xe6, 0xe4, 0xc9, - 0x7d, 0xe8, 0x9a, 0x8a, 0x0e, 0xd7, 0x46, 0xfa, 0x8d, 0xd9, 0xf0, 0xa9, 0x38, 0x62, 0x86, 0x90, - 0x93, 0xf1, 0xe2, 0xec, 0xdf, 0x2f, 0x00, 0x52, 0xb4, 0x5d, 0x47, 0xaf, 0x4b, 0x97, 0x38, 0x9a, - 0xe8, 0x75, 0xbb, 0x80, 0x98, 0x87, 0x42, 0xe0, 0x78, 0x21, 0x67, 0xeb, 0x0a, 0xb5, 0xe1, 0xe1, - 0xdc, 0x1f, 0xa6, 0xe4, 0x73, 0xb6, 0xe5, 0x14, 0x37, 0x9c, 0x51, 0x83, 0xe6, 0x79, 0xd2, 0xdb, - 0xad, 0xe7, 0x49, 0x5f, 0x87, 0x77, 0x99, 0x5f, 0xb5, 0x60, 0x58, 0x75, 0xd3, 0xbb, 0xc4, 0x7b, - 0x5f, 0xb5, 0x27, 0x67, 0xeb, 0xad, 0x68, 0x4d, 0x66, 0x47, 0xd2, 0x77, 0xb1, 0xf7, 0xb5, 0x4e, - 0xc3, 0xbd, 0x4b, 0x54, 0xb4, 0xd3, 0xb2, 0x78, 0x2f, 0x2b, 0xa0, 0x07, 0xfb, 0xe5, 0x61, 0xf5, - 0x8f, 0x47, 0x73, 0x8f, 0x8b, 0xd8, 0x3f, 0x45, 0x17, 0xbb, 0x39, 0x15, 0xd1, 0x8b, 0xd0, 0xdb, - 0xdc, 0x72, 0x42, 0x92, 0x78, 0xe5, 0xd4, 0x5b, 0xa1, 0xc0, 0x83, 0xfd, 0xf2, 0x88, 0x2a, 0xc0, - 0x20, 0x98, 0x53, 0x77, 0x1f, 0x13, 0x30, 0x3d, 0x39, 0x3b, 0xc6, 0x04, 0xfc, 0x4b, 0x0b, 0x7a, - 0x56, 0xe9, 0x06, 0xff, 0xe0, 0xb7, 0x80, 0xd7, 0x8c, 0x2d, 0xe0, 0x4c, 0x5e, 0xa2, 0x8d, 0xdc, - 0xd5, 0xbf, 0x98, 0x58, 0xfd, 0xe7, 0x72, 0x39, 0xb4, 0x5f, 0xf8, 0x3b, 0x30, 0xc8, 0xd2, 0x77, - 0x88, 0x17, 0x5d, 0xcf, 0x1b, 0x0b, 0xbe, 0x9c, 0x58, 0xf0, 0xa3, 0x1a, 0xa9, 0xb6, 0xd2, 0x9f, - 0x84, 0x7e, 0xf1, 0x44, 0x28, 0xf9, 0x4c, 0x59, 0xd0, 0x62, 0x89, 0xb7, 0x7f, 0xbc, 0x08, 0x46, - 0xba, 0x10, 0xf4, 0xab, 0x16, 0x4c, 0x07, 0xdc, 0x75, 0xb8, 0x3e, 0xdf, 0x0a, 0x5c, 0x6f, 0xb3, - 0x5a, 0xdb, 0x22, 0xf5, 0x56, 0xc3, 0xf5, 0x36, 0x97, 0x36, 0x3d, 0x5f, 0x81, 0x17, 0xee, 0x90, - 0x5a, 0x8b, 0x99, 0xf5, 0x3a, 0xe4, 0x26, 0x51, 0x2e, 0xf8, 0xcf, 0xdd, 0xdb, 0x2f, 0x4f, 0xe3, - 0x43, 0xf1, 0xc6, 0x87, 0x6c, 0x0b, 0xfa, 0x1d, 0x0b, 0x2e, 0xf1, 0x2c, 0x1a, 0xdd, 0xb7, 0xbf, - 0xcd, 0x3d, 0xbb, 0x22, 0x59, 0xc5, 0x4c, 0xd6, 0x48, 0xb0, 0x33, 0xfb, 0x41, 0xd1, 0xa1, 0x97, - 0x2a, 0x87, 0xab, 0x0b, 0x1f, 0xb6, 0x71, 0xf6, 0x3f, 0x28, 0xc2, 0xb0, 0x88, 0x1d, 0x27, 0xce, - 0x80, 0x17, 0x8d, 0x29, 0xf1, 0x48, 0x62, 0x4a, 0x8c, 0x1b, 0xc4, 0x47, 0xb3, 0xfd, 0x87, 0x30, - 0x4e, 0x37, 0xe7, 0xab, 0xc4, 0x09, 0xa2, 0x75, 0xe2, 0x70, 0x87, 0xb2, 0xe2, 0xa1, 0x77, 0x7f, - 0xa5, 0xd9, 0x5c, 0x4e, 0x32, 0xc3, 0x69, 0xfe, 0xdf, 0x49, 0x67, 0x8e, 0x07, 0x63, 0xa9, 0xf0, - 0x7f, 0x6f, 0x42, 0x49, 0xbd, 0x6f, 0x11, 0x9b, 0x4e, 0xfb, 0x28, 0x9a, 0x49, 0x0e, 0x5c, 0x71, - 0x16, 0xbf, 0xad, 0x8a, 0xd9, 0xd9, 0x7f, 0xa7, 0x60, 0x54, 0xc8, 0x07, 0x71, 0x15, 0x06, 0x9c, - 0x30, 0x74, 0x37, 0x3d, 0x52, 0x6f, 0xa7, 0xdb, 0x4c, 0x55, 0xc3, 0xde, 0x18, 0xcd, 0x88, 0x92, - 0x58, 0xf1, 0x40, 0x57, 0xb9, 0xdb, 0xde, 0x2e, 0x69, 0xa7, 0xd8, 0x4c, 0x71, 0x03, 0xe9, 0xd8, - 0xb7, 0x4b, 0xb0, 0x28, 0x8f, 0x3e, 0xc1, 0xfd, 0x2a, 0xaf, 0x79, 0xfe, 0x6d, 0xef, 0x8a, 0xef, - 0xcb, 0x38, 0x21, 0xdd, 0x31, 0x1c, 0x97, 0xde, 0x94, 0xaa, 0x38, 0x36, 0xb9, 0x75, 0x17, 0x4f, - 0xf7, 0x33, 0xc0, 0xb2, 0x06, 0x98, 0xcf, 0xc9, 0x43, 0x44, 0x60, 0x54, 0x04, 0x26, 0x94, 0x30, - 0xd1, 0x77, 0x99, 0x97, 0x40, 0xb3, 0x74, 0xac, 0x82, 0xbf, 0x66, 0xb2, 0xc0, 0x49, 0x9e, 0xf6, - 0x4f, 0x5b, 0xc0, 0x9e, 0xd6, 0x1e, 0x83, 0x3c, 0xf2, 0x61, 0x53, 0x1e, 0x99, 0xcc, 0xeb, 0xe4, - 0x1c, 0x51, 0xe4, 0x05, 0x3e, 0xb3, 0x2a, 0x81, 0x7f, 0x67, 0x4f, 0x38, 0xc3, 0x74, 0xbe, 0x7f, - 0xd8, 0xff, 0xcd, 0xe2, 0x9b, 0x58, 0x1c, 0x88, 0xe0, 0xb3, 0x30, 0x50, 0x73, 0x9a, 0x4e, 0x8d, - 0xe7, 0xb6, 0xca, 0xd5, 0x05, 0x1a, 0x85, 0xa6, 0xe7, 0x44, 0x09, 0xae, 0xdb, 0x92, 0x01, 0x2e, - 0x07, 0x24, 0xb8, 0xa3, 0x3e, 0x4b, 0x55, 0x39, 0xb5, 0x0d, 0xc3, 0x06, 0xb3, 0x07, 0xaa, 0x08, - 0xf9, 0x2c, 0x3f, 0x62, 0x55, 0x40, 0xd6, 0x1d, 0x18, 0xf7, 0xb4, 0xff, 0xf4, 0x40, 0x91, 0x97, - 0xcb, 0xc7, 0x3a, 0x1d, 0xa2, 0xec, 0xf4, 0xd1, 0x5e, 0xed, 0x26, 0xd8, 0xe0, 0x34, 0x67, 0xfb, - 0x27, 0x2c, 0x78, 0x48, 0x27, 0xd4, 0x1e, 0x06, 0x75, 0x32, 0xaf, 0xcc, 0xc3, 0x80, 0xdf, 0x24, - 0x81, 0x13, 0xf9, 0x81, 0x38, 0x35, 0x2e, 0xca, 0x4e, 0xbf, 0x2e, 0xe0, 0x07, 0x22, 0x53, 0x83, - 0xe4, 0x2e, 0xe1, 0x58, 0x95, 0xa4, 0xb7, 0x4f, 0xd6, 0x19, 0xa1, 0x78, 0x02, 0xc6, 0xf6, 0x00, - 0x66, 0xa9, 0x0f, 0xb1, 0xc0, 0xd8, 0x7f, 0x66, 0xf1, 0x89, 0xa5, 0x37, 0x1d, 0xbd, 0x0d, 0x63, - 0x3b, 0x4e, 0x54, 0xdb, 0x5a, 0xb8, 0xd3, 0x0c, 0xb8, 0xb1, 0x4a, 0xf6, 0xd3, 0xd3, 0x9d, 0xfa, - 0x49, 0xfb, 0xc8, 0xd8, 0x55, 0x74, 0x25, 0xc1, 0x0c, 0xa7, 0xd8, 0xa3, 0x75, 0x18, 0x64, 0x30, - 0xf6, 0xba, 0x31, 0x6c, 0x27, 0x1a, 0xe4, 0xd5, 0xa6, 0x9c, 0x1d, 0x56, 0x62, 0x3e, 0x58, 0x67, - 0x6a, 0x7f, 0xa5, 0xc8, 0x57, 0x3b, 0x13, 0xe5, 0x9f, 0x84, 0xfe, 0xa6, 0x5f, 0x9f, 0x5b, 0x9a, - 0xc7, 0x62, 0x14, 0xd4, 0x31, 0x52, 0xe1, 0x60, 0x2c, 0xf1, 0xe8, 0x22, 0x0c, 0x88, 0x9f, 0xd2, - 0xb8, 0xc8, 0xf6, 0x66, 0x41, 0x17, 0x62, 0x85, 0x45, 0xcf, 0x01, 0x34, 0x03, 0x7f, 0xd7, 0xad, - 0xb3, 0x68, 0x27, 0x45, 0xd3, 0x4f, 0xa9, 0xa2, 0x30, 0x58, 0xa3, 0x42, 0xaf, 0xc0, 0x70, 0xcb, - 0x0b, 0xb9, 0x38, 0xa2, 0xc5, 0x94, 0x56, 0x1e, 0x34, 0x37, 0x74, 0x24, 0x36, 0x69, 0xd1, 0x0c, - 0xf4, 0x45, 0x0e, 0xf3, 0xbb, 0xe9, 0xcd, 0x77, 0x27, 0x5e, 0xa3, 0x14, 0x7a, 0x1a, 0x25, 0x5a, - 0x00, 0x8b, 0x82, 0xe8, 0x4d, 0xf9, 0xd0, 0x98, 0x6f, 0xec, 0xc2, 0x8f, 0xbf, 0xbb, 0x43, 0x40, - 0x7b, 0x66, 0x2c, 0xde, 0x07, 0x18, 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x4e, 0x44, 0x02, 0xcf, 0x69, - 0x28, 0x6f, 0x39, 0x25, 0x17, 0xcc, 0xfb, 0xab, 0x7e, 0x74, 0x23, 0x24, 0x0b, 0x8a, 0x02, 0x6b, - 0xd4, 0xf6, 0xef, 0x94, 0x00, 0x62, 0xb9, 0x1d, 0xdd, 0x4d, 0x6d, 0x5c, 0xcf, 0xb4, 0x97, 0xf4, - 0x8f, 0x6e, 0xd7, 0x42, 0x9f, 0xb7, 0x60, 0x50, 0x04, 0x75, 0x61, 0x23, 0x54, 0x68, 0xbf, 0x71, - 0x9a, 0xb1, 0x65, 0x68, 0x09, 0xde, 0x84, 0xe7, 0xe5, 0x0c, 0xd5, 0x30, 0x1d, 0x5b, 0xa1, 0x57, - 0x8c, 0xde, 0x2f, 0xaf, 0x8a, 0x45, 0xa3, 0x2b, 0xd5, 0x55, 0xb1, 0xc4, 0xce, 0x08, 0xfd, 0x96, - 0x78, 0xc3, 0xb8, 0x25, 0xf6, 0xe4, 0xbf, 0xa4, 0x34, 0xc4, 0xd7, 0x4e, 0x17, 0x44, 0x54, 0xd1, - 0xa3, 0x2a, 0xf4, 0xe6, 0x3f, 0xff, 0xd3, 0xee, 0x49, 0x1d, 0x22, 0x2a, 0x7c, 0x1a, 0x46, 0xeb, - 0xa6, 0x10, 0x20, 0x66, 0xe2, 0x13, 0x79, 0x7c, 0x13, 0x32, 0x43, 0x7c, 0xec, 0x27, 0x10, 0x38, - 0xc9, 0x18, 0x55, 0x78, 0x90, 0x8d, 0x25, 0x6f, 0xc3, 0x17, 0x6f, 0x49, 0xec, 0xdc, 0xb1, 0xdc, - 0x0b, 0x23, 0xb2, 0x43, 0x29, 0xe3, 0xd3, 0x7d, 0x55, 0x94, 0xc5, 0x8a, 0x0b, 0x7a, 0x1d, 0xfa, - 0xd8, 0xfb, 0xaf, 0x70, 0x72, 0x20, 0x5f, 0x57, 0x6d, 0x46, 0x1b, 0x8c, 0x17, 0x24, 0xfb, 0x1b, - 0x62, 0xc1, 0x01, 0x5d, 0x95, 0xaf, 0x2b, 0xc3, 0x25, 0xef, 0x46, 0x48, 0xd8, 0xeb, 0xca, 0xd2, - 0xec, 0x63, 0xf1, 0xc3, 0x49, 0x0e, 0xcf, 0x4c, 0xb6, 0x68, 0x94, 0xa4, 0x52, 0x94, 0xf8, 0x2f, - 0x73, 0x38, 0x8a, 0xd8, 0x48, 0x99, 0xcd, 0x33, 0xf3, 0x3c, 0xc6, 0xdd, 0x79, 0xd3, 0x64, 0x81, - 0x93, 0x3c, 0xa9, 0x44, 0xca, 0x57, 0xbd, 0x78, 0x8d, 0xd2, 0x69, 0xef, 0xe0, 0x17, 0x71, 0x76, - 0x1a, 0x71, 0x08, 0x16, 0xe5, 0x8f, 0x55, 0x3c, 0x98, 0xf2, 0x60, 0x2c, 0xb9, 0x44, 0x1f, 0xa8, - 0x38, 0xf2, 0x27, 0x3d, 0x30, 0x62, 0x4e, 0x29, 0x74, 0x09, 0x4a, 0x82, 0x89, 0xca, 0x83, 0xa2, - 0x56, 0xc9, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0xa5, 0xbf, 0x61, 0xc5, 0x35, 0xf7, 0xe3, 0x38, 0xfd, - 0x8d, 0xc2, 0x60, 0x8d, 0x8a, 0x5e, 0xac, 0xd6, 0x7d, 0x3f, 0x52, 0x07, 0x92, 0x9a, 0x77, 0xb3, - 0x0c, 0x8a, 0x05, 0x96, 0x1e, 0x44, 0xdb, 0x24, 0xf0, 0x48, 0xc3, 0x8c, 0x3f, 0xae, 0x0e, 0xa2, - 0x6b, 0x3a, 0x12, 0x9b, 0xb4, 0xf4, 0x38, 0xf5, 0x43, 0x36, 0x91, 0xc5, 0xf5, 0x2d, 0x76, 0xe7, - 0xae, 0xf2, 0x87, 0xe9, 0x12, 0x8f, 0x3e, 0x06, 0x0f, 0xa9, 0x58, 0x5f, 0x98, 0xdb, 0x41, 0x64, - 0x8d, 0x7d, 0x86, 0xb6, 0xe5, 0xa1, 0xb9, 0x6c, 0x32, 0x9c, 0x57, 0x1e, 0xbd, 0x06, 0x23, 0x42, - 0xc4, 0x97, 0x1c, 0xfb, 0x4d, 0xdf, 0xa4, 0x6b, 0x06, 0x16, 0x27, 0xa8, 0x65, 0x04, 0x75, 0x26, - 0x65, 0x4b, 0x0e, 0x03, 0xe9, 0x08, 0xea, 0x3a, 0x1e, 0xa7, 0x4a, 0xa0, 0x19, 0x18, 0xe5, 0x32, - 0x98, 0xeb, 0x6d, 0xf2, 0x31, 0x11, 0x8f, 0xc5, 0xd4, 0x92, 0xba, 0x6e, 0xa2, 0x71, 0x92, 0x1e, - 0xbd, 0x04, 0x43, 0x4e, 0x50, 0xdb, 0x72, 0x23, 0x52, 0x8b, 0x5a, 0x01, 0x7f, 0x45, 0xa6, 0x39, - 0x77, 0xcd, 0x68, 0x38, 0x6c, 0x50, 0xda, 0x77, 0x61, 0x22, 0x23, 0x62, 0x05, 0x9d, 0x38, 0x4e, - 0xd3, 0x95, 0xdf, 0x94, 0xf0, 0xa0, 0x9e, 0xa9, 0x2c, 0xc9, 0xaf, 0xd1, 0xa8, 0xe8, 0xec, 0x64, - 0x91, 0x2d, 0xb4, 0x94, 0xad, 0x6a, 0x76, 0x2e, 0x4a, 0x04, 0x8e, 0x69, 0xec, 0xff, 0x58, 0x80, - 0xd1, 0x0c, 0xdb, 0x0a, 0x4b, 0x1b, 0x9a, 0xb8, 0xa4, 0xc4, 0x59, 0x42, 0xcd, 0x80, 0xfc, 0x85, - 0x43, 0x04, 0xe4, 0x2f, 0x76, 0x0a, 0xc8, 0xdf, 0xf3, 0x4e, 0x02, 0xf2, 0x9b, 0x3d, 0xd6, 0xdb, - 0x55, 0x8f, 0x65, 0x04, 0xf1, 0xef, 0x3b, 0x64, 0x10, 0x7f, 0xa3, 0xd3, 0xfb, 0xbb, 0xe8, 0xf4, - 0x1f, 0x29, 0xc0, 0x58, 0xd2, 0x09, 0xf5, 0x18, 0xf4, 0xb6, 0xaf, 0x1b, 0x7a, 0xdb, 0x8b, 0xdd, - 0x3c, 0xee, 0xcd, 0xd5, 0xe1, 0xe2, 0x84, 0x0e, 0xf7, 0xa9, 0xae, 0xb8, 0xb5, 0xd7, 0xe7, 0xfe, - 0x64, 0x01, 0x4e, 0x66, 0xbe, 0x2e, 0x3e, 0x86, 0xbe, 0xb9, 0x6e, 0xf4, 0xcd, 0xb3, 0x5d, 0x3f, - 0x7c, 0xce, 0xed, 0xa0, 0x5b, 0x89, 0x0e, 0xba, 0xd4, 0x3d, 0xcb, 0xf6, 0xbd, 0xf4, 0x8d, 0x22, - 0x9c, 0xcb, 0x2c, 0x17, 0xab, 0x3d, 0x17, 0x0d, 0xb5, 0xe7, 0x73, 0x09, 0xb5, 0xa7, 0xdd, 0xbe, - 0xf4, 0xd1, 0xe8, 0x41, 0xc5, 0x03, 0x60, 0x16, 0xc6, 0xe0, 0x3e, 0x75, 0xa0, 0xc6, 0x03, 0x60, - 0xc5, 0x08, 0x9b, 0x7c, 0xbf, 0x93, 0x74, 0x9f, 0xbf, 0x6d, 0xc1, 0xe9, 0xcc, 0xb1, 0x39, 0x06, - 0x5d, 0xd7, 0xaa, 0xa9, 0xeb, 0x7a, 0xb2, 0xeb, 0xd9, 0x9a, 0xa3, 0xfc, 0xfa, 0xb9, 0xde, 0x9c, - 0x6f, 0x61, 0x37, 0xf9, 0xeb, 0x30, 0xe8, 0xd4, 0x6a, 0x24, 0x0c, 0x57, 0xfc, 0xba, 0x8a, 0xdd, - 0xfd, 0x2c, 0xbb, 0x67, 0xc5, 0xe0, 0x83, 0xfd, 0xf2, 0x54, 0x92, 0x45, 0x8c, 0xc6, 0x3a, 0x07, - 0xf4, 0x09, 0x18, 0x08, 0xc5, 0xb9, 0x29, 0xc6, 0xfe, 0xf9, 0x2e, 0x3b, 0xc7, 0x59, 0x27, 0x0d, - 0x33, 0x48, 0x94, 0xd2, 0x54, 0x28, 0x96, 0xe8, 0x7f, 0xd1, 0x03, 0xca, 0xa4, 0xa5, 0xca, 0x44, - 0x78, 0x93, 0xfb, 0x08, 0x2b, 0xf3, 0x1c, 0xc0, 0xae, 0xba, 0x12, 0x24, 0xb5, 0x10, 0xda, 0x65, - 0x41, 0xa3, 0x42, 0x1f, 0x81, 0xb1, 0x90, 0xc7, 0x52, 0x9c, 0x6b, 0x38, 0x21, 0x7b, 0xad, 0x23, - 0xe6, 0x22, 0x0b, 0x47, 0x55, 0x4d, 0xe0, 0x70, 0x8a, 0x1a, 0x2d, 0xca, 0x5a, 0x99, 0x27, 0x09, - 0x9f, 0x9e, 0x17, 0xe2, 0x1a, 0x85, 0x37, 0xc9, 0x89, 0xe4, 0x20, 0xb0, 0xee, 0xd7, 0x4a, 0xa2, - 0x4f, 0x00, 0xd0, 0x49, 0x24, 0xb4, 0x11, 0xfd, 0xf9, 0x5b, 0x28, 0xdd, 0x5b, 0xea, 0x99, 0xce, - 0xd1, 0xec, 0xe5, 0xee, 0xbc, 0x62, 0x82, 0x35, 0x86, 0xc8, 0x81, 0xe1, 0xf8, 0x5f, 0x9c, 0xd9, - 0xf7, 0x62, 0x6e, 0x0d, 0x49, 0xe6, 0x4c, 0xf1, 0x3d, 0xaf, 0xb3, 0xc0, 0x26, 0x47, 0xfb, 0xdf, - 0x0e, 0xc0, 0xc3, 0x6d, 0x36, 0x63, 0x34, 0x63, 0x1a, 0x7c, 0x9f, 0x4e, 0xde, 0xe2, 0xa7, 0x32, - 0x0b, 0x1b, 0xd7, 0xfa, 0xc4, 0x9c, 0x2f, 0xbc, 0xe3, 0x39, 0xff, 0x43, 0x96, 0xa6, 0x5f, 0xe1, - 0x4e, 0xa9, 0x1f, 0x3e, 0xe4, 0x21, 0x73, 0x84, 0x0a, 0x97, 0x8d, 0x0c, 0xad, 0xc5, 0x73, 0x5d, - 0x37, 0xa7, 0x7b, 0x35, 0xc6, 0xd7, 0xb2, 0x03, 0x08, 0x73, 0x85, 0xc6, 0x95, 0xc3, 0x7e, 0xff, - 0x71, 0x05, 0x13, 0xfe, 0x7d, 0x0b, 0x4e, 0xa7, 0xc0, 0xbc, 0x0d, 0x24, 0x14, 0x31, 0xae, 0x56, - 0xdf, 0x71, 0xe3, 0x25, 0x43, 0xfe, 0x0d, 0x57, 0xc5, 0x37, 0x9c, 0xce, 0xa5, 0x4b, 0x36, 0xfd, - 0x8b, 0x7f, 0x54, 0x9e, 0x60, 0x15, 0x98, 0x84, 0x38, 0xbf, 0xe9, 0xc7, 0x7b, 0xfd, 0xff, 0xd6, - 0xc4, 0x4e, 0x9e, 0x5a, 0x86, 0x73, 0xed, 0xbb, 0xfa, 0x50, 0xcf, 0x9b, 0x7f, 0xcf, 0x82, 0xb3, - 0x6d, 0x63, 0xe8, 0x7c, 0x1b, 0x4a, 0xbb, 0xf6, 0xe7, 0x2c, 0x78, 0x24, 0xb3, 0x84, 0xe1, 0x23, - 0x77, 0x09, 0x4a, 0xb5, 0x44, 0x3e, 0xd5, 0x38, 0x9a, 0x84, 0xca, 0xa5, 0x1a, 0xd3, 0x18, 0xae, - 0x70, 0x85, 0x8e, 0xae, 0x70, 0xbf, 0x61, 0x41, 0xea, 0xac, 0x3a, 0x06, 0xd1, 0x69, 0xc9, 0x14, - 0x9d, 0x1e, 0xeb, 0xa6, 0x37, 0x73, 0xa4, 0xa6, 0xbf, 0x18, 0x85, 0x53, 0x39, 0xaf, 0x13, 0x77, - 0x61, 0x7c, 0xb3, 0x46, 0xcc, 0xe7, 0xe8, 0xed, 0xc2, 0x34, 0xb5, 0x7d, 0xbb, 0xce, 0xd3, 0xd8, - 0xa6, 0x48, 0x70, 0xba, 0x0a, 0xf4, 0x39, 0x0b, 0x4e, 0x38, 0xb7, 0xc3, 0x05, 0x2a, 0x02, 0xbb, - 0xb5, 0xd9, 0x86, 0x5f, 0xdb, 0xa6, 0x92, 0x85, 0x5c, 0x56, 0x2f, 0x64, 0xaa, 0x25, 0x6f, 0x55, - 0x53, 0xf4, 0x46, 0xf5, 0x2c, 0x69, 0x79, 0x16, 0x15, 0xce, 0xac, 0x0b, 0x61, 0x91, 0x5f, 0x85, - 0x5e, 0xb0, 0xdb, 0x04, 0x4c, 0xc8, 0x7a, 0x46, 0xca, 0x65, 0x3a, 0x89, 0xc1, 0x8a, 0x0f, 0xfa, - 0x14, 0x94, 0x36, 0xe5, 0xdb, 0xe8, 0x0c, 0x99, 0x31, 0xee, 0xc8, 0xf6, 0x2f, 0xc6, 0xb9, 0x6f, - 0x81, 0x22, 0xc2, 0x31, 0x53, 0xf4, 0x1a, 0x14, 0xbd, 0x8d, 0xb0, 0x5d, 0xde, 0xef, 0x84, 0x13, - 0x29, 0x0f, 0x4b, 0xb2, 0xba, 0x58, 0xc5, 0xb4, 0x20, 0xba, 0x0a, 0xc5, 0x60, 0xbd, 0x2e, 0x74, - 0xea, 0x99, 0x8b, 0x14, 0xcf, 0xce, 0xe7, 0xb4, 0x8a, 0x71, 0xc2, 0xb3, 0xf3, 0x98, 0xb2, 0x40, - 0x15, 0xe8, 0x65, 0x4f, 0xfa, 0x84, 0x6c, 0x96, 0x79, 0x17, 0x6d, 0xf3, 0x34, 0x96, 0xc7, 0x2e, - 0x61, 0x04, 0x98, 0x33, 0x42, 0x6b, 0xd0, 0x57, 0x63, 0x39, 0xa2, 0x85, 0x30, 0xf6, 0xfe, 0x4c, - 0xed, 0x79, 0x9b, 0xe4, 0xd9, 0x42, 0x99, 0xcc, 0x28, 0xb0, 0xe0, 0xc5, 0xb8, 0x92, 0xe6, 0xd6, - 0x46, 0xc8, 0xb4, 0x6f, 0x79, 0x5c, 0xdb, 0xe4, 0x84, 0x17, 0x5c, 0x19, 0x05, 0x16, 0xbc, 0xd0, - 0xcb, 0x50, 0xd8, 0xa8, 0x89, 0xe7, 0x7a, 0x99, 0x6a, 0x74, 0x33, 0xb2, 0xcc, 0x6c, 0xdf, 0xbd, - 0xfd, 0x72, 0x61, 0x71, 0x0e, 0x17, 0x36, 0x6a, 0x68, 0x15, 0xfa, 0x37, 0x78, 0x2c, 0x0a, 0xa1, - 0x29, 0x7f, 0x22, 0x3b, 0x4c, 0x46, 0x2a, 0x5c, 0x05, 0x7f, 0xfa, 0x25, 0x10, 0x58, 0x32, 0x61, - 0xe9, 0x3e, 0x54, 0x4c, 0x0d, 0x11, 0xd2, 0x6f, 0xfa, 0x70, 0x71, 0x50, 0xb8, 0xac, 0x1c, 0x47, - 0xe6, 0xc0, 0x1a, 0x47, 0x3a, 0xab, 0x9d, 0xbb, 0xad, 0x80, 0xc5, 0x7b, 0x17, 0xb1, 0x9f, 0x32, - 0x67, 0xf5, 0x8c, 0x24, 0x6a, 0x37, 0xab, 0x15, 0x11, 0x8e, 0x99, 0xa2, 0x6d, 0x18, 0xde, 0x0d, - 0x9b, 0x5b, 0x44, 0x2e, 0x69, 0x16, 0x0a, 0x2a, 0x47, 0xd6, 0xbb, 0x29, 0x08, 0xdd, 0x20, 0x6a, - 0x39, 0x8d, 0xd4, 0x2e, 0xc4, 0xe4, 0xf2, 0x9b, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0xdd, 0xff, 0x76, - 0xcb, 0x5f, 0xdf, 0x8b, 0x88, 0x88, 0xc4, 0x97, 0xd9, 0xfd, 0x6f, 0x70, 0x92, 0x74, 0xf7, 0x0b, - 0x04, 0x96, 0x4c, 0xd0, 0x4d, 0xd1, 0x3d, 0x6c, 0xf7, 0x1c, 0xcb, 0x0f, 0xf3, 0x3b, 0x23, 0x89, - 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, 0x2b, 0xb6, 0x4b, 0x36, 0xb7, 0xfc, 0xc8, 0xf7, 0x12, 0x3b, - 0xf4, 0x78, 0xfe, 0x2e, 0x59, 0xc9, 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x54, - 0x87, 0x91, 0xa6, 0x1f, 0x44, 0xb7, 0xfd, 0x40, 0xce, 0x2f, 0xd4, 0x46, 0xd3, 0x67, 0x50, 0x8a, - 0x1a, 0x59, 0x90, 0x4b, 0x13, 0x83, 0x13, 0x3c, 0xd1, 0x47, 0xa1, 0x3f, 0xac, 0x39, 0x0d, 0xb2, - 0x74, 0x7d, 0x72, 0x22, 0xff, 0xf8, 0xa9, 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x43, 0x89, 0x70, 0x12, - 0x2c, 0xd9, 0xa1, 0x45, 0xe8, 0x65, 0x69, 0x34, 0x59, 0xd8, 0xc8, 0x9c, 0x68, 0xc5, 0x29, 0x97, - 0x7e, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, 0xe9, 0x1a, 0x10, 0x57, 0x5d, 0x3f, 0x9c, 0x3c, 0x99, - 0xbf, 0x06, 0xc4, 0x0d, 0xf9, 0x7a, 0xb5, 0xdd, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, - 0xba, 0x9b, 0x9e, 0x6a, 0xe3, 0x8b, 0x96, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, - 0xfd, 0xc7, 0xfd, 0x69, 0x99, 0x85, 0xa9, 0x48, 0xfe, 0x37, 0x2b, 0x65, 0x3d, 0xff, 0x40, 0xb7, - 0x1a, 0xdb, 0x23, 0xbc, 0xd6, 0x7d, 0xce, 0x82, 0x53, 0xcd, 0xcc, 0x0f, 0x11, 0x02, 0x40, 0x77, - 0x8a, 0x5f, 0xfe, 0xe9, 0x2a, 0xc4, 0x68, 0x36, 0x1e, 0xe7, 0xd4, 0x94, 0xbc, 0x3a, 0x17, 0xdf, - 0xf1, 0xd5, 0x79, 0x05, 0x06, 0x6a, 0xfc, 0x9e, 0x23, 0x43, 0x63, 0x77, 0x15, 0x20, 0x8f, 0x89, - 0x12, 0xe2, 0x82, 0xb4, 0x81, 0x15, 0x0b, 0xf4, 0xc3, 0x16, 0x9c, 0x4d, 0x36, 0x1d, 0x13, 0x86, - 0x16, 0x71, 0x49, 0xb9, 0x5e, 0x66, 0x51, 0x7c, 0x7f, 0x4a, 0xfe, 0x37, 0x88, 0x0f, 0x3a, 0x11, - 0xe0, 0xf6, 0x95, 0xa1, 0xf9, 0x0c, 0xc5, 0x50, 0x9f, 0x69, 0x12, 0xeb, 0x42, 0x39, 0xf4, 0x02, - 0x0c, 0xed, 0xf8, 0x2d, 0x2f, 0x12, 0xae, 0x6b, 0xc2, 0x8d, 0x86, 0xb9, 0x8f, 0xac, 0x68, 0x70, - 0x6c, 0x50, 0x25, 0x54, 0x4a, 0x03, 0xf7, 0xad, 0x52, 0x7a, 0x0b, 0x86, 0x3c, 0xcd, 0xd7, 0x5a, - 0xc8, 0x03, 0x17, 0xf2, 0x95, 0x6e, 0xba, 0x67, 0x36, 0x6f, 0xa5, 0x0e, 0xc1, 0x06, 0xb7, 0xe3, - 0xf5, 0x69, 0xfb, 0xf9, 0x42, 0x86, 0x50, 0xcf, 0xd5, 0x4a, 0xaf, 0x9a, 0x6a, 0xa5, 0x0b, 0x49, - 0xb5, 0x52, 0xca, 0x1c, 0x62, 0x68, 0x94, 0xba, 0x4f, 0xb1, 0xd5, 0x75, 0x5c, 0xd2, 0xef, 0xb5, - 0xe0, 0x21, 0xa6, 0x5f, 0xa7, 0x15, 0xbc, 0x63, 0x9d, 0xfa, 0xc3, 0xf7, 0xf6, 0xcb, 0x0f, 0x2d, - 0x67, 0xb3, 0xc3, 0x79, 0xf5, 0xd8, 0x0d, 0x38, 0xdf, 0xe9, 0x68, 0x64, 0x7e, 0x94, 0x75, 0x65, - 0x80, 0x8f, 0xfd, 0x28, 0xeb, 0x4b, 0xf3, 0x98, 0x61, 0xba, 0x8d, 0xba, 0x65, 0xff, 0x7b, 0x0b, - 0x8a, 0x15, 0xbf, 0x7e, 0x0c, 0x97, 0xee, 0x0f, 0x1b, 0x97, 0xee, 0x87, 0xb3, 0x0f, 0xe5, 0x7a, - 0xae, 0x41, 0x69, 0x21, 0x61, 0x50, 0x3a, 0x9b, 0xc7, 0xa0, 0xbd, 0xf9, 0xe8, 0xa7, 0x8a, 0x30, - 0x58, 0xf1, 0xeb, 0xea, 0x11, 0xc3, 0x3f, 0xba, 0x9f, 0x47, 0x0c, 0xb9, 0x49, 0x53, 0x34, 0xce, - 0xcc, 0xfd, 0x52, 0xbe, 0xfc, 0xfe, 0x36, 0x7b, 0xcb, 0x70, 0x8b, 0xb8, 0x9b, 0x5b, 0x11, 0xa9, - 0x27, 0x3f, 0xe7, 0xf8, 0xde, 0x32, 0xfc, 0x71, 0x01, 0x46, 0x13, 0xb5, 0xa3, 0x06, 0x0c, 0x37, - 0x74, 0x73, 0x85, 0x98, 0xa7, 0xf7, 0x65, 0xe9, 0x10, 0xbe, 0xe0, 0x1a, 0x08, 0x9b, 0xcc, 0xd1, - 0x34, 0x80, 0xb2, 0xdf, 0x4b, 0x75, 0x35, 0xbb, 0x79, 0x28, 0x03, 0x7f, 0x88, 0x35, 0x0a, 0xf4, - 0x22, 0x0c, 0x46, 0x7e, 0xd3, 0x6f, 0xf8, 0x9b, 0x7b, 0xd7, 0x88, 0x0c, 0xc8, 0xa6, 0x3c, 0x3c, - 0xd7, 0x62, 0x14, 0xd6, 0xe9, 0xd0, 0x1d, 0x18, 0x57, 0x4c, 0xaa, 0x47, 0x60, 0xc2, 0x61, 0x9a, - 0x8d, 0xd5, 0x24, 0x47, 0x9c, 0xae, 0xc4, 0xfe, 0x99, 0x22, 0xef, 0x62, 0x2f, 0x72, 0xdf, 0x5b, - 0x0d, 0xef, 0xee, 0xd5, 0xf0, 0x0d, 0x0b, 0xc6, 0x68, 0xed, 0xcc, 0x7d, 0x4d, 0x8a, 0x1a, 0x2a, - 0x92, 0xba, 0xd5, 0x26, 0x92, 0xfa, 0x05, 0xba, 0x6b, 0xd6, 0xfd, 0x56, 0x24, 0xf4, 0x87, 0xda, - 0xb6, 0x48, 0xa1, 0x58, 0x60, 0x05, 0x1d, 0x09, 0x02, 0xf1, 0xe4, 0x56, 0xa7, 0x23, 0x41, 0x80, - 0x05, 0x56, 0x06, 0x5a, 0xef, 0xc9, 0x0e, 0xb4, 0xce, 0xe3, 0xe5, 0x0a, 0x47, 0x27, 0x21, 0xf4, - 0x69, 0xf1, 0x72, 0xa5, 0x07, 0x54, 0x4c, 0x63, 0x7f, 0xad, 0x08, 0x43, 0x15, 0xbf, 0x1e, 0xdb, - 0xee, 0x5f, 0x30, 0x6c, 0xf7, 0xe7, 0x13, 0xb6, 0xfb, 0x31, 0x9d, 0xf6, 0x3d, 0x4b, 0xfd, 0xb7, - 0xca, 0x52, 0xff, 0xeb, 0x16, 0x1b, 0xb5, 0xf9, 0xd5, 0x2a, 0xf7, 0x86, 0x44, 0x97, 0x61, 0x90, - 0x6d, 0x30, 0xec, 0x8d, 0xb7, 0x34, 0x68, 0xb3, 0xc4, 0x67, 0xab, 0x31, 0x18, 0xeb, 0x34, 0xe8, - 0x22, 0x0c, 0x84, 0xc4, 0x09, 0x6a, 0x5b, 0x6a, 0x77, 0x15, 0xd6, 0x67, 0x0e, 0xc3, 0x0a, 0x8b, - 0xde, 0x88, 0x43, 0xb5, 0x16, 0xf3, 0xdf, 0x8c, 0xea, 0xed, 0xe1, 0x4b, 0x24, 0x3f, 0x3e, 0xab, - 0x7d, 0x0b, 0x50, 0x9a, 0xbe, 0x8b, 0x60, 0x82, 0x65, 0x33, 0x98, 0x60, 0x29, 0x15, 0x48, 0xf0, - 0xaf, 0x2c, 0x18, 0xa9, 0xf8, 0x75, 0xba, 0x74, 0xbf, 0x93, 0xd6, 0xa9, 0x1e, 0xa7, 0xba, 0xaf, - 0x4d, 0x9c, 0xea, 0x47, 0xa1, 0xb7, 0xe2, 0xd7, 0x3b, 0x04, 0x3c, 0xfc, 0xff, 0x2d, 0xe8, 0xaf, - 0xf8, 0xf5, 0x63, 0x30, 0x4d, 0xbc, 0x6a, 0x9a, 0x26, 0x1e, 0xca, 0x99, 0x37, 0x39, 0xd6, 0x88, - 0xff, 0xaf, 0x07, 0x86, 0x69, 0x3b, 0xfd, 0x4d, 0x39, 0x94, 0x46, 0xb7, 0x59, 0x5d, 0x74, 0x1b, - 0x95, 0xc2, 0xfd, 0x46, 0xc3, 0xbf, 0x9d, 0x1c, 0xd6, 0x45, 0x06, 0xc5, 0x02, 0x8b, 0x9e, 0x81, - 0x81, 0x66, 0x40, 0x76, 0x5d, 0x5f, 0x88, 0xb7, 0x9a, 0xa1, 0xa7, 0x22, 0xe0, 0x58, 0x51, 0xd0, - 0xab, 0x69, 0xe8, 0x7a, 0xf4, 0x28, 0xaf, 0xf9, 0x5e, 0x9d, 0x6b, 0xef, 0x8b, 0x22, 0x99, 0x8a, - 0x06, 0xc7, 0x06, 0x15, 0xba, 0x05, 0x25, 0xf6, 0x9f, 0x6d, 0x3b, 0x87, 0x4f, 0xe3, 0x2c, 0xd2, - 0x4b, 0x0a, 0x06, 0x38, 0xe6, 0x85, 0x9e, 0x03, 0x88, 0x64, 0x42, 0x82, 0x50, 0x04, 0xbe, 0x53, - 0x57, 0x01, 0x95, 0xaa, 0x20, 0xc4, 0x1a, 0x15, 0x7a, 0x1a, 0x4a, 0x91, 0xe3, 0x36, 0x96, 0x5d, - 0x8f, 0xd9, 0x7f, 0x69, 0xfb, 0x45, 0x96, 0x47, 0x01, 0xc4, 0x31, 0x9e, 0x8a, 0x62, 0x2c, 0x28, - 0x0a, 0x4f, 0x62, 0x3f, 0xc0, 0xa8, 0x99, 0x28, 0xb6, 0xac, 0xa0, 0x58, 0xa3, 0x40, 0x5b, 0x70, - 0xc6, 0xf5, 0x58, 0xe2, 0x11, 0x52, 0xdd, 0x76, 0x9b, 0x6b, 0xcb, 0xd5, 0x9b, 0x24, 0x70, 0x37, - 0xf6, 0x66, 0x9d, 0xda, 0x36, 0xf1, 0x64, 0x82, 0x5e, 0x99, 0xb7, 0xfd, 0xcc, 0x52, 0x1b, 0x5a, - 0xdc, 0x96, 0x93, 0xfd, 0x3c, 0x9b, 0xef, 0xd7, 0xab, 0xe8, 0x29, 0x63, 0xeb, 0x38, 0xa5, 0x6f, - 0x1d, 0x07, 0xfb, 0xe5, 0xbe, 0xeb, 0x55, 0x2d, 0x32, 0xc7, 0x4b, 0x70, 0xb2, 0xe2, 0xd7, 0x2b, - 0x7e, 0x10, 0x2d, 0xfa, 0xc1, 0x6d, 0x27, 0xa8, 0xcb, 0xe9, 0x55, 0x96, 0xb1, 0x49, 0xe8, 0xfe, - 0xd9, 0xcb, 0x77, 0x17, 0x23, 0xee, 0xc8, 0xf3, 0x4c, 0x62, 0x3b, 0xe4, 0x8b, 0xba, 0x1a, 0x93, - 0x1d, 0x54, 0xea, 0x9e, 0x2b, 0x4e, 0x44, 0xd0, 0x75, 0x96, 0x82, 0x3f, 0x3e, 0x46, 0x45, 0xf1, - 0x27, 0xb5, 0x14, 0xfc, 0x31, 0x32, 0xf3, 0xdc, 0x35, 0xcb, 0xdb, 0x9f, 0x15, 0x95, 0x70, 0x3d, - 0x00, 0xf7, 0x5a, 0xec, 0x26, 0x87, 0xb5, 0xcc, 0xed, 0x51, 0xc8, 0x4f, 0x0a, 0xc1, 0x2d, 0xaf, - 0x6d, 0x73, 0x7b, 0xd8, 0xdf, 0x0d, 0xa7, 0x92, 0xd5, 0x77, 0x9d, 0x48, 0x7b, 0x0e, 0xc6, 0x03, - 0xbd, 0xa0, 0x96, 0x28, 0xed, 0x24, 0xcf, 0xc7, 0x90, 0x40, 0xe2, 0x34, 0xbd, 0xfd, 0x22, 0x8c, - 0xd3, 0xbb, 0xa7, 0x12, 0xe4, 0x58, 0x2f, 0x77, 0x0e, 0xd2, 0xf2, 0x1f, 0x7a, 0xd9, 0x41, 0x94, - 0xc8, 0x9a, 0x83, 0x3e, 0x09, 0x23, 0x21, 0x59, 0x76, 0xbd, 0xd6, 0x1d, 0xa9, 0x7d, 0x6a, 0xf3, - 0x94, 0xb4, 0xba, 0xa0, 0x53, 0x72, 0x1d, 0xb6, 0x09, 0xc3, 0x09, 0x6e, 0x68, 0x07, 0x46, 0x6e, - 0xbb, 0x5e, 0xdd, 0xbf, 0x1d, 0x4a, 0xfe, 0x03, 0xf9, 0xaa, 0xec, 0x5b, 0x9c, 0x32, 0xd1, 0x46, - 0xa3, 0xba, 0x5b, 0x06, 0x33, 0x9c, 0x60, 0x4e, 0x17, 0x7b, 0xd0, 0xf2, 0x66, 0xc2, 0x1b, 0x21, - 0xe1, 0x8f, 0x03, 0xc5, 0x62, 0xc7, 0x12, 0x88, 0x63, 0x3c, 0x5d, 0xec, 0xec, 0xcf, 0x95, 0xc0, - 0x6f, 0xf1, 0x14, 0x2d, 0x62, 0xb1, 0x63, 0x05, 0xc5, 0x1a, 0x05, 0xdd, 0x0c, 0xd9, 0xbf, 0x55, - 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0xed, 0x93, 0xa5, 0x18, 0xd3, 0xe0, 0xd8, 0xa0, 0x42, 0x8b, 0x80, - 0xc2, 0x56, 0xb3, 0xd9, 0x60, 0xde, 0x69, 0x4e, 0x83, 0xb1, 0xe2, 0x6e, 0x3b, 0x45, 0x1e, 0x62, - 0xba, 0x9a, 0xc2, 0xe2, 0x8c, 0x12, 0xf4, 0x5c, 0xdc, 0x10, 0x4d, 0xed, 0x65, 0x4d, 0xe5, 0x66, - 0xaf, 0x2a, 0x6f, 0xa7, 0xc4, 0xa1, 0x05, 0xe8, 0x0f, 0xf7, 0xc2, 0x5a, 0xd4, 0x08, 0xdb, 0x25, - 0x74, 0xab, 0x32, 0x12, 0x2d, 0x9f, 0x28, 0x2f, 0x82, 0x65, 0x59, 0x54, 0x83, 0x09, 0xc1, 0x71, - 0x6e, 0xcb, 0xf1, 0x54, 0x9a, 0x29, 0xee, 0xaa, 0x7f, 0xf9, 0xde, 0x7e, 0x79, 0x42, 0xd4, 0xac, - 0xa3, 0x0f, 0xf6, 0xcb, 0x74, 0x71, 0x64, 0x60, 0x70, 0x16, 0x37, 0x3e, 0xf9, 0x6a, 0x35, 0x7f, - 0xa7, 0x59, 0x09, 0xfc, 0x0d, 0xb7, 0x41, 0xda, 0x99, 0x0e, 0xab, 0x06, 0xa5, 0x98, 0x7c, 0x06, - 0x0c, 0x27, 0xb8, 0xd9, 0x9f, 0x65, 0xb2, 0x63, 0xd5, 0xdd, 0xf4, 0x9c, 0xa8, 0x15, 0x10, 0xb4, - 0x03, 0xc3, 0x4d, 0xb6, 0xbb, 0x88, 0xc4, 0x29, 0x62, 0xae, 0xbf, 0xd0, 0xa5, 0xfa, 0xe9, 0x36, - 0x4b, 0xfd, 0x66, 0xb8, 0xba, 0x55, 0x74, 0x76, 0xd8, 0xe4, 0x6e, 0xff, 0xf3, 0xd3, 0x4c, 0xfa, - 0xa8, 0x72, 0x9d, 0x52, 0xbf, 0x78, 0x19, 0x24, 0xae, 0xb1, 0x53, 0xf9, 0x0a, 0xd6, 0x78, 0x58, - 0xc4, 0xeb, 0x22, 0x2c, 0xcb, 0xa2, 0x4f, 0xc0, 0x08, 0xbd, 0x15, 0x2a, 0x09, 0x20, 0x9c, 0x3c, - 0x91, 0x1f, 0xc1, 0x45, 0x51, 0xe9, 0x49, 0x95, 0xf4, 0xc2, 0x38, 0xc1, 0x0c, 0xbd, 0xc1, 0x5c, - 0xcb, 0x24, 0xeb, 0x42, 0x37, 0xac, 0x75, 0x2f, 0x32, 0xc9, 0x56, 0x63, 0x82, 0x5a, 0x30, 0x91, - 0x4e, 0x1d, 0x19, 0x4e, 0xda, 0xf9, 0xe2, 0x75, 0x3a, 0xfb, 0x63, 0x9c, 0xfd, 0x26, 0x8d, 0x0b, - 0x71, 0x16, 0x7f, 0xb4, 0x9c, 0x4c, 0xec, 0x57, 0x34, 0xf4, 0xbe, 0xa9, 0xe4, 0x7e, 0xc3, 0x6d, - 0x73, 0xfa, 0x6d, 0xc2, 0x59, 0x2d, 0x37, 0xda, 0x95, 0xc0, 0x61, 0xce, 0x1b, 0x2e, 0xdb, 0x4e, - 0x35, 0xb9, 0xe8, 0x91, 0x7b, 0xfb, 0xe5, 0xb3, 0x6b, 0xed, 0x08, 0x71, 0x7b, 0x3e, 0xe8, 0x3a, - 0x9c, 0xe4, 0xf1, 0x07, 0xe6, 0x89, 0x53, 0x6f, 0xb8, 0x9e, 0x12, 0xbc, 0xf8, 0x92, 0x3f, 0x7d, - 0x6f, 0xbf, 0x7c, 0x72, 0x26, 0x8b, 0x00, 0x67, 0x97, 0x43, 0xaf, 0x42, 0xa9, 0xee, 0x85, 0xa2, - 0x0f, 0xfa, 0x8c, 0xf4, 0x73, 0xa5, 0xf9, 0xd5, 0xaa, 0xfa, 0xfe, 0xf8, 0x0f, 0x8e, 0x0b, 0xa0, - 0x4d, 0x6e, 0x1b, 0x50, 0xda, 0xa2, 0xfe, 0x54, 0xe4, 0xb6, 0xa4, 0x42, 0xd5, 0x78, 0x81, 0xcc, - 0x8d, 0x62, 0xea, 0x61, 0x8e, 0xf1, 0x38, 0xd9, 0x60, 0x8c, 0x5e, 0x07, 0x24, 0xd2, 0x1c, 0xcc, - 0xd4, 0x58, 0x56, 0x1e, 0x76, 0x34, 0x0e, 0x98, 0x6f, 0x62, 0xab, 0x29, 0x0a, 0x9c, 0x51, 0x0a, - 0x5d, 0xa5, 0xbb, 0x8a, 0x0e, 0x15, 0xbb, 0x96, 0x4a, 0x72, 0x3a, 0x4f, 0x9a, 0x01, 0x61, 0x3e, - 0x66, 0x26, 0x47, 0x9c, 0x28, 0x87, 0xea, 0x70, 0xc6, 0x69, 0x45, 0x3e, 0x33, 0xbb, 0x98, 0xa4, - 0x6b, 0xfe, 0x36, 0xf1, 0x98, 0xc5, 0x73, 0x80, 0x45, 0x84, 0x3b, 0x33, 0xd3, 0x86, 0x0e, 0xb7, - 0xe5, 0x42, 0x25, 0x72, 0x95, 0xd5, 0x1c, 0xcc, 0x78, 0x74, 0x19, 0x99, 0xcd, 0x5f, 0x84, 0xc1, - 0x2d, 0x3f, 0x8c, 0x56, 0x49, 0x74, 0xdb, 0x0f, 0xb6, 0x45, 0x5c, 0xe5, 0x38, 0x96, 0x7d, 0x8c, - 0xc2, 0x3a, 0x1d, 0xbd, 0x72, 0x33, 0x7f, 0x9c, 0xa5, 0x79, 0xe6, 0x0a, 0x31, 0x10, 0xef, 0x31, - 0x57, 0x39, 0x18, 0x4b, 0xbc, 0x24, 0x5d, 0xaa, 0xcc, 0x31, 0xb7, 0x86, 0x04, 0xe9, 0x52, 0x65, - 0x0e, 0x4b, 0x3c, 0x9d, 0xae, 0xe1, 0x96, 0x13, 0x90, 0x4a, 0xe0, 0xd7, 0x48, 0xa8, 0x65, 0x50, - 0x78, 0x98, 0x47, 0x8d, 0xa6, 0xd3, 0xb5, 0x9a, 0x45, 0x80, 0xb3, 0xcb, 0x21, 0x92, 0xce, 0x0b, - 0x38, 0x92, 0x6f, 0x8f, 0x4a, 0xcb, 0x33, 0x5d, 0xa6, 0x06, 0xf4, 0x60, 0x4c, 0x65, 0x24, 0xe4, - 0x71, 0xa2, 0xc3, 0xc9, 0x51, 0x36, 0xb7, 0xbb, 0x0f, 0x32, 0xad, 0x2c, 0x7c, 0x4b, 0x09, 0x4e, - 0x38, 0xc5, 0xdb, 0x08, 0x39, 0x38, 0xd6, 0x31, 0xe4, 0xe0, 0x25, 0x28, 0x85, 0xad, 0xf5, 0xba, - 0xbf, 0xe3, 0xb8, 0x1e, 0x73, 0x6b, 0xd0, 0xee, 0x7e, 0x55, 0x89, 0xc0, 0x31, 0x0d, 0x5a, 0x84, - 0x01, 0x47, 0x9a, 0xef, 0x50, 0x7e, 0xa8, 0x28, 0x65, 0xb4, 0xe3, 0xd1, 0x53, 0xa4, 0xc1, 0x4e, - 0x95, 0x45, 0xaf, 0xc0, 0xb0, 0x78, 0x3f, 0x2f, 0x92, 0xf8, 0x4e, 0x98, 0x8f, 0x1c, 0xab, 0x3a, - 0x12, 0x9b, 0xb4, 0xe8, 0x06, 0x0c, 0x46, 0x7e, 0x83, 0xbd, 0xd4, 0xa3, 0x62, 0xde, 0xa9, 0xfc, - 0xa0, 0x87, 0x6b, 0x8a, 0x4c, 0xd7, 0x5a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0xd6, 0xf8, 0x7c, 0x67, - 0xf9, 0x12, 0x48, 0x28, 0xb2, 0xc0, 0x9e, 0xcd, 0xf3, 0x49, 0x63, 0x64, 0xe6, 0x72, 0x10, 0x25, - 0xb1, 0xce, 0x06, 0x5d, 0x81, 0xf1, 0x66, 0xe0, 0xfa, 0x6c, 0x4e, 0x28, 0xcb, 0xed, 0xa4, 0x99, - 0x1d, 0xad, 0x92, 0x24, 0xc0, 0xe9, 0x32, 0x2c, 0xfc, 0x81, 0x00, 0x4e, 0x9e, 0xe6, 0x19, 0x5e, - 0xf8, 0x55, 0x9a, 0xc3, 0xb0, 0xc2, 0xa2, 0x15, 0xb6, 0x13, 0x73, 0x2d, 0xd0, 0xe4, 0x54, 0x7e, - 0x74, 0x2a, 0x5d, 0x5b, 0xc4, 0x85, 0x57, 0xf5, 0x17, 0xc7, 0x1c, 0x50, 0x5d, 0x4b, 0xac, 0x4a, - 0xaf, 0x00, 0xe1, 0xe4, 0x99, 0x36, 0x4e, 0x91, 0x89, 0x5b, 0x59, 0x2c, 0x10, 0x18, 0xe0, 0x10, - 0x27, 0x78, 0xa2, 0x8f, 0xc0, 0x98, 0x88, 0xc6, 0x19, 0x77, 0xd3, 0xd9, 0xf8, 0xe5, 0x03, 0x4e, - 0xe0, 0x70, 0x8a, 0x9a, 0x67, 0x58, 0x71, 0xd6, 0x1b, 0x44, 0x6c, 0x7d, 0xcb, 0xae, 0xb7, 0x1d, - 0x4e, 0x9e, 0x63, 0xfb, 0x83, 0xc8, 0xb0, 0x92, 0xc4, 0xe2, 0x8c, 0x12, 0x68, 0x0d, 0xc6, 0x9a, - 0x01, 0x21, 0x3b, 0x4c, 0xd0, 0x17, 0xe7, 0x59, 0x99, 0x47, 0xff, 0xa0, 0x2d, 0xa9, 0x24, 0x70, - 0x07, 0x19, 0x30, 0x9c, 0xe2, 0x80, 0x6e, 0xc3, 0x80, 0xbf, 0x4b, 0x82, 0x2d, 0xe2, 0xd4, 0x27, - 0xcf, 0xb7, 0x79, 0x8f, 0x23, 0x0e, 0xb7, 0xeb, 0x82, 0x36, 0xe1, 0xed, 0x21, 0xc1, 0x9d, 0xbd, - 0x3d, 0x64, 0x65, 0xe8, 0x7f, 0xb7, 0xe0, 0xb4, 0x34, 0xce, 0x54, 0x9b, 0xb4, 0xd7, 0xe7, 0x7c, - 0x2f, 0x8c, 0x02, 0x1e, 0xaf, 0xe2, 0x91, 0xfc, 0x18, 0x0e, 0x6b, 0x39, 0x85, 0x94, 0x22, 0xfa, - 0x74, 0x1e, 0x45, 0x88, 0xf3, 0x6b, 0xa4, 0x57, 0xd3, 0x90, 0x44, 0x72, 0x33, 0x9a, 0x09, 0x17, - 0xdf, 0x98, 0x5f, 0x9d, 0x7c, 0x94, 0x07, 0xdb, 0xa0, 0x8b, 0xa1, 0x9a, 0x44, 0xe2, 0x34, 0x3d, - 0xba, 0x0c, 0x05, 0x3f, 0x9c, 0x7c, 0xac, 0x4d, 0x2e, 0x5e, 0xbf, 0x7e, 0xbd, 0xca, 0xbd, 0xfe, - 0xae, 0x57, 0x71, 0xc1, 0x0f, 0x65, 0x96, 0x13, 0x7a, 0x1f, 0x0b, 0x27, 0x1f, 0xe7, 0x6a, 0x4b, - 0x99, 0xe5, 0x84, 0x01, 0x71, 0x8c, 0x47, 0x5b, 0x30, 0x1a, 0x1a, 0xf7, 0xde, 0x70, 0xf2, 0x02, - 0xeb, 0xa9, 0xc7, 0xf3, 0x06, 0xcd, 0xa0, 0xd6, 0xd2, 0x0f, 0x98, 0x5c, 0x70, 0x92, 0x2d, 0x5f, - 0x5d, 0xda, 0xcd, 0x3b, 0x9c, 0x7c, 0xa2, 0xc3, 0xea, 0xd2, 0x88, 0xf5, 0xd5, 0xa5, 0xf3, 0xc0, - 0x09, 0x9e, 0x53, 0xdf, 0x05, 0xe3, 0x29, 0x71, 0xe9, 0x30, 0x1e, 0xee, 0x53, 0xdb, 0x30, 0x6c, - 0x4c, 0xc9, 0x07, 0xea, 0x5d, 0xf1, 0xdb, 0x25, 0x28, 0x29, 0xab, 0x37, 0xba, 0x64, 0x3a, 0x54, - 0x9c, 0x4e, 0x3a, 0x54, 0x0c, 0x54, 0xfc, 0xba, 0xe1, 0x43, 0xb1, 0x96, 0x11, 0x92, 0x31, 0x6f, - 0x03, 0xec, 0xfe, 0x91, 0x8a, 0x66, 0x4a, 0x28, 0x76, 0xed, 0x99, 0xd1, 0xd3, 0xd6, 0x3a, 0x71, - 0x05, 0xc6, 0x3d, 0x9f, 0xc9, 0xe8, 0xa4, 0x2e, 0x05, 0x30, 0x26, 0x67, 0x95, 0xf4, 0x18, 0x47, - 0x09, 0x02, 0x9c, 0x2e, 0x43, 0x2b, 0xe4, 0x82, 0x52, 0xd2, 0x1c, 0xc2, 0xe5, 0x28, 0x2c, 0xb0, - 0xf4, 0x6e, 0xc8, 0x7f, 0x85, 0x93, 0x63, 0xf9, 0x77, 0x43, 0x5e, 0x28, 0x29, 0x8c, 0x85, 0x52, - 0x18, 0x63, 0xda, 0xff, 0xa6, 0x5f, 0x5f, 0xaa, 0x08, 0x31, 0x5f, 0x8b, 0x27, 0x5c, 0x5f, 0xaa, - 0x60, 0x8e, 0x43, 0x33, 0xd0, 0xc7, 0x7e, 0x84, 0x93, 0x43, 0xf9, 0x31, 0x71, 0x58, 0x09, 0x2d, - 0xcb, 0x1a, 0x2b, 0x80, 0x45, 0x41, 0xa6, 0xdd, 0xa5, 0x77, 0x23, 0xa6, 0xdd, 0xed, 0xbf, 0x4f, - 0xed, 0xae, 0x64, 0x80, 0x63, 0x5e, 0xe8, 0x0e, 0x9c, 0x34, 0xee, 0xa3, 0xea, 0xd5, 0x0e, 0xe4, - 0x1b, 0x7e, 0x13, 0xc4, 0xb3, 0x67, 0x45, 0xa3, 0x4f, 0x2e, 0x65, 0x71, 0xc2, 0xd9, 0x15, 0xa0, - 0x06, 0x8c, 0xd7, 0x52, 0xb5, 0x0e, 0x74, 0x5f, 0xab, 0x9a, 0x17, 0xe9, 0x1a, 0xd3, 0x8c, 0xd1, - 0x2b, 0x30, 0xf0, 0xb6, 0x1f, 0xb2, 0x23, 0x52, 0x5c, 0x4d, 0x64, 0x50, 0x87, 0x81, 0x37, 0xae, - 0x57, 0x19, 0xfc, 0x60, 0xbf, 0x3c, 0x58, 0xf1, 0xeb, 0xf2, 0x2f, 0x56, 0x05, 0xd0, 0x0f, 0x58, - 0x30, 0x95, 0xbe, 0xf0, 0xaa, 0x46, 0x0f, 0x77, 0xdf, 0x68, 0x5b, 0x54, 0x3a, 0xb5, 0x90, 0xcb, - 0x0e, 0xb7, 0xa9, 0x0a, 0x7d, 0x88, 0xae, 0xa7, 0xd0, 0xbd, 0x4b, 0x44, 0x8a, 0xda, 0x47, 0xe2, - 0xf5, 0x44, 0xa1, 0x07, 0xfb, 0xe5, 0x51, 0xbe, 0x33, 0xba, 0x77, 0xe5, 0xf3, 0x26, 0x51, 0x00, - 0x7d, 0x37, 0x9c, 0x0c, 0xd2, 0x1a, 0x54, 0x22, 0x85, 0xf0, 0xa7, 0xba, 0xd9, 0x65, 0x93, 0x03, - 0x8e, 0xb3, 0x18, 0xe2, 0xec, 0x7a, 0xec, 0x5f, 0xb1, 0x98, 0x7e, 0x5b, 0x34, 0x8b, 0x84, 0xad, - 0xc6, 0x71, 0x24, 0xc6, 0x5e, 0x30, 0x6c, 0xc7, 0xf7, 0xed, 0x58, 0xf4, 0x0f, 0x2d, 0xe6, 0x58, - 0x74, 0x8c, 0xaf, 0x98, 0xde, 0x80, 0x81, 0x48, 0x26, 0x2c, 0x6f, 0x93, 0xcb, 0x5b, 0x6b, 0x14, - 0x73, 0xae, 0x52, 0x97, 0x1c, 0x95, 0x9b, 0x5c, 0xb1, 0xb1, 0xff, 0x1e, 0x1f, 0x01, 0x89, 0x39, - 0x06, 0x13, 0xdd, 0xbc, 0x69, 0xa2, 0x2b, 0x77, 0xf8, 0x82, 0x1c, 0x53, 0xdd, 0xdf, 0x35, 0xdb, - 0xcd, 0x94, 0x7b, 0xef, 0x76, 0x8f, 0x36, 0xfb, 0x0b, 0x16, 0x40, 0x1c, 0x6a, 0xbe, 0x8b, 0x94, - 0x94, 0x2f, 0xd1, 0x6b, 0x8d, 0x1f, 0xf9, 0x35, 0xbf, 0x21, 0x0c, 0x14, 0x67, 0x62, 0x2b, 0x21, - 0x87, 0x1f, 0x68, 0xbf, 0xb1, 0xa2, 0x46, 0x65, 0x19, 0xd8, 0xb2, 0x18, 0xdb, 0xad, 0x8d, 0xa0, - 0x96, 0x5f, 0xb6, 0xe0, 0x44, 0x96, 0x4b, 0x3c, 0xbd, 0x24, 0x73, 0x35, 0xa7, 0xf2, 0x36, 0x54, - 0xa3, 0x79, 0x53, 0xc0, 0xb1, 0xa2, 0xe8, 0x3a, 0xd7, 0xe7, 0xe1, 0x62, 0xbc, 0x5f, 0x87, 0xe1, - 0x4a, 0x40, 0x34, 0xf9, 0xe2, 0x35, 0x1e, 0x2c, 0x85, 0xb7, 0xe7, 0x99, 0x43, 0x07, 0x4a, 0xb1, - 0xbf, 0x52, 0x80, 0x13, 0xdc, 0x69, 0x67, 0x66, 0xd7, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x1e, 0x32, - 0xbe, 0x09, 0x43, 0x4d, 0x4d, 0x37, 0xdd, 0x2e, 0x5e, 0xb1, 0xae, 0xc3, 0x8e, 0xb5, 0x69, 0x3a, - 0x14, 0x1b, 0xbc, 0x50, 0x1d, 0x86, 0xc8, 0xae, 0x5b, 0x53, 0x9e, 0x1f, 0x85, 0x43, 0x1f, 0xd2, - 0xaa, 0x96, 0x05, 0x8d, 0x0f, 0x36, 0xb8, 0x3e, 0x80, 0x0c, 0xfc, 0xf6, 0x8f, 0x5a, 0xf0, 0x50, - 0x4e, 0x74, 0x63, 0x5a, 0xdd, 0x6d, 0xe6, 0x1e, 0x25, 0xa6, 0xad, 0xaa, 0x8e, 0x3b, 0x4d, 0x61, - 0x81, 0x45, 0x1f, 0x05, 0xe0, 0x4e, 0x4f, 0xc4, 0xab, 0x75, 0x0c, 0x03, 0x6b, 0x44, 0xb0, 0xd4, - 0x82, 0x11, 0xca, 0xf2, 0x58, 0xe3, 0x65, 0x7f, 0xb9, 0x07, 0x7a, 0x99, 0x93, 0x0d, 0xaa, 0x40, - 0xff, 0x16, 0xcf, 0x74, 0xd5, 0x76, 0xdc, 0x28, 0xad, 0x4c, 0x9e, 0x15, 0x8f, 0x9b, 0x06, 0xc5, - 0x92, 0x0d, 0x5a, 0x81, 0x09, 0x9e, 0x70, 0xac, 0x31, 0x4f, 0x1a, 0xce, 0x9e, 0x54, 0xfb, 0xf2, - 0x1c, 0xda, 0x4a, 0xfd, 0xbd, 0x94, 0x26, 0xc1, 0x59, 0xe5, 0xd0, 0x6b, 0x30, 0x42, 0xaf, 0xe1, - 0x7e, 0x2b, 0x92, 0x9c, 0x78, 0xaa, 0x31, 0x75, 0x33, 0x59, 0x33, 0xb0, 0x38, 0x41, 0x8d, 0x5e, - 0x81, 0xe1, 0x66, 0x4a, 0xc1, 0xdd, 0x1b, 0x6b, 0x82, 0x4c, 0xa5, 0xb6, 0x49, 0xcb, 0xbc, 0xe2, - 0x5b, 0xec, 0x0d, 0xc0, 0xda, 0x56, 0x40, 0xc2, 0x2d, 0xbf, 0x51, 0x67, 0x12, 0x70, 0xaf, 0xe6, - 0x15, 0x9f, 0xc0, 0xe3, 0x54, 0x09, 0xca, 0x65, 0xc3, 0x71, 0x1b, 0xad, 0x80, 0xc4, 0x5c, 0xfa, - 0x4c, 0x2e, 0x8b, 0x09, 0x3c, 0x4e, 0x95, 0xe8, 0xac, 0xb9, 0xef, 0x3f, 0x1a, 0xcd, 0xbd, 0xfd, - 0x37, 0x0a, 0x60, 0x0c, 0xed, 0x77, 0x6e, 0x0a, 0x34, 0xfa, 0x65, 0x9b, 0x41, 0xb3, 0x26, 0x1c, - 0xca, 0x32, 0xbf, 0x2c, 0xce, 0x7f, 0xcc, 0xbf, 0x8c, 0xfe, 0xc7, 0xac, 0x14, 0x5d, 0xe3, 0x27, - 0x2b, 0x81, 0x4f, 0x0f, 0x39, 0x19, 0x4e, 0x4f, 0x3d, 0x3e, 0xe9, 0x97, 0x41, 0x06, 0xda, 0x04, - 0x9e, 0x15, 0xee, 0xf9, 0x9c, 0x83, 0xe1, 0x7b, 0x55, 0x15, 0xd1, 0x3e, 0x24, 0x17, 0x74, 0x19, - 0x06, 0x45, 0x5e, 0x2b, 0xf6, 0x46, 0x82, 0x2f, 0x26, 0xe6, 0x2b, 0x36, 0x1f, 0x83, 0xb1, 0x4e, - 0x63, 0xff, 0x60, 0x01, 0x26, 0x32, 0x1e, 0xb9, 0xf1, 0x63, 0x64, 0xd3, 0x0d, 0x23, 0x95, 0x62, - 0x59, 0x3b, 0x46, 0x38, 0x1c, 0x2b, 0x0a, 0xba, 0x57, 0xf1, 0x83, 0x2a, 0x79, 0x38, 0x89, 0x47, - 0x24, 0x02, 0x7b, 0xc8, 0x64, 0xc5, 0xe7, 0xa1, 0xa7, 0x15, 0x12, 0x19, 0x32, 0x5a, 0x1d, 0xdb, - 0xcc, 0xac, 0xcd, 0x30, 0xf4, 0x0a, 0xb8, 0xa9, 0x2c, 0xc4, 0xda, 0x15, 0x90, 0xdb, 0x88, 0x39, - 0x8e, 0x36, 0x2e, 0x22, 0x9e, 0xe3, 0x45, 0xe2, 0xa2, 0x18, 0xc7, 0x3e, 0x65, 0x50, 0x2c, 0xb0, - 0xf6, 0x97, 0x8a, 0x70, 0x3a, 0xf7, 0xd9, 0x2b, 0x6d, 0xfa, 0x8e, 0xef, 0xb9, 0x91, 0xaf, 0x9c, - 0xf0, 0x78, 0xbc, 0x53, 0xd2, 0xdc, 0x5a, 0x11, 0x70, 0xac, 0x28, 0xd0, 0x05, 0xe8, 0x65, 0x4a, - 0xf1, 0x54, 0xb2, 0xe9, 0xd9, 0x79, 0x1e, 0x00, 0x8f, 0xa3, 0xb5, 0x53, 0xbd, 0xd8, 0xf6, 0x54, - 0x7f, 0x94, 0x4a, 0x30, 0x7e, 0x23, 0x79, 0xa0, 0xd0, 0xe6, 0xfa, 0x7e, 0x03, 0x33, 0x24, 0x7a, - 0x5c, 0xf4, 0x57, 0xc2, 0xeb, 0x0c, 0x3b, 0x75, 0x3f, 0xd4, 0x3a, 0xed, 0x49, 0xe8, 0xdf, 0x26, - 0x7b, 0x81, 0xeb, 0x6d, 0x26, 0xbd, 0x11, 0xaf, 0x71, 0x30, 0x96, 0x78, 0x33, 0xef, 0x69, 0xff, - 0x51, 0x27, 0xf6, 0x1f, 0xe8, 0x28, 0x9e, 0xfc, 0x50, 0x11, 0x46, 0xf1, 0xec, 0xfc, 0x7b, 0x03, - 0x71, 0x23, 0x3d, 0x10, 0x47, 0x9d, 0xd8, 0xbf, 0xf3, 0x68, 0xfc, 0xa2, 0x05, 0xa3, 0x2c, 0xbb, - 0x96, 0x88, 0x59, 0xe1, 0xfa, 0xde, 0x31, 0x5c, 0x05, 0x1e, 0x85, 0xde, 0x80, 0x56, 0x9a, 0xcc, - 0x32, 0xcd, 0x5a, 0x82, 0x39, 0x0e, 0x9d, 0x81, 0x1e, 0xd6, 0x04, 0x3a, 0x78, 0x43, 0x7c, 0x0b, - 0x9e, 0x77, 0x22, 0x07, 0x33, 0x28, 0x0b, 0xff, 0x86, 0x49, 0xb3, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, - 0x0b, 0xef, 0x8e, 0x80, 0x18, 0x99, 0x4d, 0x7b, 0x67, 0xe1, 0xdf, 0xb2, 0x59, 0xb6, 0xbf, 0x66, - 0xff, 0x79, 0x01, 0xce, 0x65, 0x96, 0xeb, 0x3a, 0xfc, 0x5b, 0xfb, 0xd2, 0x0f, 0x32, 0x0b, 0x52, - 0xf1, 0x18, 0x7d, 0xbd, 0x7b, 0xba, 0x95, 0xfe, 0x7b, 0xbb, 0x88, 0xca, 0x96, 0xd9, 0x65, 0xef, - 0x92, 0xa8, 0x6c, 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x75, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, - 0x22, 0xdd, 0x67, 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x03, - 0xa3, 0x3b, 0xae, 0x47, 0x37, 0x9f, 0x3d, 0x53, 0x14, 0x57, 0xb6, 0x8c, 0x15, 0x13, 0x8d, 0x93, - 0xf4, 0xc8, 0xd5, 0x22, 0xb6, 0xf1, 0xaf, 0x7b, 0xe5, 0x50, 0xab, 0x6e, 0xda, 0x74, 0xe7, 0x50, - 0xbd, 0x98, 0x11, 0xbd, 0x6d, 0x45, 0xd3, 0x13, 0x15, 0xbb, 0xd7, 0x13, 0x0d, 0x65, 0xeb, 0x88, - 0xa6, 0x5e, 0x81, 0xe1, 0xfb, 0xb6, 0x8d, 0xd8, 0xdf, 0x28, 0xc2, 0xc3, 0x6d, 0x96, 0x3d, 0xdf, - 0xeb, 0x8d, 0x31, 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xc4, 0x46, 0xab, 0xd1, 0xd8, 0x63, - 0x4f, 0xa0, 0x48, 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0x2c, 0x66, 0xd0, 0xe0, 0xcc, - 0x92, 0xf4, 0x8a, 0x45, 0x4f, 0x92, 0x3d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, - 0xe8, 0x0a, 0x8c, 0x3b, 0xbb, 0x8e, 0xcb, 0xc3, 0xde, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0x74, 0xd1, - 0x33, 0x49, 0x02, 0x9c, 0x2e, 0x83, 0x5e, 0x07, 0xe4, 0xaf, 0xb3, 0x87, 0x12, 0xf5, 0x2b, 0xc4, - 0x13, 0x56, 0x77, 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x9e, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x81, - 0xc9, 0xfa, 0xf2, 0x03, 0x93, 0xb5, 0xdf, 0x17, 0x3b, 0x26, 0xe0, 0xba, 0x0c, 0xc3, 0x87, 0x74, - 0xff, 0xb5, 0xff, 0xb5, 0x05, 0x4a, 0x41, 0x6c, 0xc6, 0xfe, 0x7d, 0x85, 0xf9, 0x27, 0x73, 0xd5, - 0xb6, 0x16, 0x2d, 0xe9, 0xa4, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, - 0x6c, 0xdc, 0x0a, 0x44, 0x68, 0x42, 0x45, 0x81, 0x3e, 0x06, 0xfd, 0x75, 0x77, 0xd7, 0x0d, 0x85, - 0x72, 0xec, 0xd0, 0xc6, 0xb8, 0x78, 0xeb, 0x9c, 0xe7, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, - 0xee, 0x93, 0x37, 0x5a, 0x7e, 0xe4, 0x1c, 0xc3, 0x49, 0x7e, 0xc5, 0x38, 0xc9, 0x1f, 0xcf, 0x1e, - 0x68, 0xad, 0x49, 0xb9, 0x27, 0xf8, 0xf5, 0xc4, 0x09, 0xfe, 0x44, 0x67, 0x56, 0xed, 0x4f, 0xee, - 0xbf, 0x6f, 0xc1, 0xb8, 0x41, 0x7f, 0x0c, 0x07, 0xc8, 0xa2, 0x79, 0x80, 0x3c, 0xd2, 0xf1, 0x1b, - 0x72, 0x0e, 0x8e, 0xef, 0x2f, 0x26, 0xda, 0xce, 0x0e, 0x8c, 0xb7, 0xa1, 0x67, 0xcb, 0x09, 0xea, - 0xed, 0xb2, 0xd2, 0xa4, 0x0a, 0x4d, 0x5f, 0x75, 0x02, 0xe1, 0xa9, 0xf0, 0x8c, 0xec, 0x75, 0x0a, - 0xea, 0xe8, 0xa5, 0xc0, 0xaa, 0x42, 0x2f, 0x41, 0x5f, 0x58, 0xf3, 0x9b, 0xea, 0xcd, 0x14, 0x4b, - 0x7c, 0x5a, 0x65, 0x90, 0x83, 0xfd, 0x32, 0x32, 0xab, 0xa3, 0x60, 0x2c, 0xe8, 0xd1, 0x9b, 0x30, - 0xcc, 0x7e, 0x29, 0xb7, 0xc1, 0x62, 0xbe, 0x06, 0xa3, 0xaa, 0x13, 0x72, 0x9f, 0x5a, 0x03, 0x84, - 0x4d, 0x56, 0x53, 0x9b, 0x50, 0x52, 0x9f, 0xf5, 0x40, 0xad, 0xdd, 0xff, 0xb2, 0x08, 0x13, 0x19, - 0x73, 0x0e, 0x85, 0xc6, 0x48, 0x5c, 0xee, 0x72, 0xaa, 0xbe, 0xc3, 0xb1, 0x08, 0xd9, 0x05, 0xaa, - 0x2e, 0xe6, 0x56, 0xd7, 0x95, 0xde, 0x08, 0x49, 0xb2, 0x52, 0x0a, 0xea, 0x5c, 0x29, 0xad, 0xec, - 0xd8, 0xba, 0x9a, 0x56, 0xa4, 0x5a, 0xfa, 0x40, 0xc7, 0xf4, 0xd7, 0x7b, 0xe0, 0x44, 0x56, 0xc8, - 0x58, 0xf4, 0x99, 0x44, 0x36, 0xe5, 0x17, 0xda, 0xf5, 0xb0, 0x5e, 0x92, 0xa7, 0x58, 0x16, 0x61, - 0x20, 0xa7, 0xcd, 0xfc, 0xca, 0x1d, 0xbb, 0x59, 0xd4, 0xc9, 0x02, 0xd0, 0x04, 0x3c, 0x0b, 0xb6, - 0xdc, 0x3e, 0x3e, 0xd0, 0x75, 0x03, 0x44, 0xfa, 0xec, 0x30, 0xe1, 0x92, 0x24, 0xc1, 0x9d, 0x5d, - 0x92, 0x64, 0xcd, 0x68, 0x09, 0xfa, 0x6a, 0xdc, 0xd7, 0xa5, 0xd8, 0x79, 0x0b, 0xe3, 0x8e, 0x2e, - 0x6a, 0x03, 0x16, 0x0e, 0x2e, 0x82, 0xc1, 0x94, 0x0b, 0x83, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x4d, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x51, 0x0b, 0x12, 0x0f, 0x5e, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x9d, 0x87, 0x9e, 0xc0, 0x6f, 0x90, 0x64, 0x1e, 0x62, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x21, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x42, 0x6f, - 0x83, 0xec, 0x92, 0x46, 0x32, 0x5d, 0xdc, 0x32, 0x05, 0x62, 0x8e, 0xb3, 0x7f, 0xb1, 0x07, 0xce, - 0xb6, 0x8d, 0x06, 0x45, 0x2f, 0x63, 0x9b, 0x4e, 0x44, 0x6e, 0x3b, 0x7b, 0xc9, 0xbc, 0x4e, 0x57, - 0x38, 0x18, 0x4b, 0x3c, 0x7b, 0xfe, 0xc9, 0xd3, 0x33, 0x24, 0x54, 0x98, 0x22, 0x2b, 0x83, 0xc0, - 0x9a, 0x2a, 0xb1, 0xe2, 0x51, 0xa8, 0xc4, 0x9e, 0x03, 0x08, 0xc3, 0x06, 0x77, 0x0b, 0xac, 0x8b, - 0x77, 0xa5, 0x71, 0x1a, 0x8f, 0xea, 0xb2, 0xc0, 0x60, 0x8d, 0x0a, 0xcd, 0xc3, 0x58, 0x33, 0xf0, - 0x23, 0xae, 0x11, 0x9e, 0xe7, 0x9e, 0xb3, 0xbd, 0x66, 0x20, 0x9e, 0x4a, 0x02, 0x8f, 0x53, 0x25, - 0xd0, 0x8b, 0x30, 0x28, 0x82, 0xf3, 0x54, 0x7c, 0xbf, 0x21, 0x94, 0x50, 0xca, 0x99, 0xb4, 0x1a, - 0xa3, 0xb0, 0x4e, 0xa7, 0x15, 0x63, 0x6a, 0xe6, 0xfe, 0xcc, 0x62, 0x5c, 0xd5, 0xac, 0xd1, 0x25, - 0x22, 0x51, 0x0f, 0x74, 0x15, 0x89, 0x3a, 0x56, 0xcb, 0x95, 0xba, 0xb6, 0x7a, 0x42, 0x47, 0x45, - 0xd6, 0x57, 0x7b, 0x60, 0x42, 0x4c, 0x9c, 0x07, 0x3d, 0x5d, 0x6e, 0xa4, 0xa7, 0xcb, 0x51, 0x28, - 0xee, 0xde, 0x9b, 0x33, 0xc7, 0x3d, 0x67, 0x7e, 0xd8, 0x02, 0x53, 0x52, 0x43, 0xff, 0x6b, 0x6e, - 0x62, 0xbc, 0x17, 0x73, 0x25, 0xbf, 0x38, 0xca, 0xef, 0x3b, 0x4b, 0x91, 0x67, 0xff, 0x2b, 0x0b, - 0x1e, 0xe9, 0xc8, 0x11, 0x2d, 0x40, 0x89, 0x89, 0x93, 0xda, 0x45, 0xef, 0x09, 0xe5, 0x59, 0x2f, - 0x11, 0x39, 0xd2, 0x6d, 0x5c, 0x12, 0x2d, 0xa4, 0x32, 0x10, 0x3e, 0x99, 0x91, 0x81, 0xf0, 0xa4, - 0xd1, 0x3d, 0xf7, 0x99, 0x82, 0xf0, 0x8b, 0xf4, 0xc4, 0x31, 0x5e, 0xb5, 0xa1, 0x0f, 0x18, 0x4a, - 0x47, 0x3b, 0xa1, 0x74, 0x44, 0x26, 0xb5, 0x76, 0x86, 0x7c, 0x04, 0xc6, 0x58, 0xd4, 0x3e, 0xf6, - 0xce, 0x43, 0xbc, 0xb7, 0x2b, 0xc4, 0xbe, 0xdc, 0xcb, 0x09, 0x1c, 0x4e, 0x51, 0xdb, 0x7f, 0x5a, - 0x84, 0x3e, 0xbe, 0xfc, 0x8e, 0xe1, 0x7a, 0xf9, 0x34, 0x94, 0xdc, 0x9d, 0x9d, 0x16, 0x4f, 0x2a, - 0xd7, 0x1b, 0x7b, 0x06, 0x2f, 0x49, 0x20, 0x8e, 0xf1, 0x68, 0x51, 0xe8, 0xbb, 0xdb, 0x04, 0x06, - 0xe6, 0x0d, 0x9f, 0x9e, 0x77, 0x22, 0x87, 0xcb, 0x4a, 0xea, 0x9c, 0x8d, 0x35, 0xe3, 0xe8, 0x93, - 0x00, 0x61, 0x14, 0xb8, 0xde, 0x26, 0x85, 0x89, 0xd8, 0xea, 0x4f, 0xb5, 0xe1, 0x56, 0x55, 0xc4, - 0x9c, 0x67, 0xbc, 0xe7, 0x28, 0x04, 0xd6, 0x38, 0xa2, 0x69, 0xe3, 0xa4, 0x9f, 0x4a, 0x8c, 0x1d, - 0x70, 0xae, 0xf1, 0x98, 0x4d, 0x7d, 0x10, 0x4a, 0x8a, 0x79, 0x27, 0xed, 0xd7, 0x90, 0x2e, 0x16, - 0x7d, 0x18, 0x46, 0x13, 0x6d, 0x3b, 0x94, 0xf2, 0xec, 0x97, 0x2c, 0x18, 0xe5, 0x8d, 0x59, 0xf0, - 0x76, 0xc5, 0x69, 0x70, 0x17, 0x4e, 0x34, 0x32, 0x76, 0x65, 0x31, 0xfc, 0xdd, 0xef, 0xe2, 0x4a, - 0x59, 0x96, 0x85, 0xc5, 0x99, 0x75, 0xa0, 0x8b, 0x74, 0xc5, 0xd1, 0x5d, 0xd7, 0x69, 0x88, 0xf8, - 0x06, 0x43, 0x7c, 0xb5, 0x71, 0x18, 0x56, 0x58, 0xfb, 0x0f, 0x2c, 0x18, 0xe7, 0x2d, 0xbf, 0x46, - 0xf6, 0xd4, 0xde, 0xf4, 0xad, 0x6c, 0xbb, 0x48, 0x67, 0x5a, 0xc8, 0x49, 0x67, 0xaa, 0x7f, 0x5a, - 0xb1, 0xed, 0xa7, 0x7d, 0xc5, 0x02, 0x31, 0x43, 0x8e, 0x41, 0x9f, 0xf1, 0x5d, 0xa6, 0x3e, 0x63, - 0x2a, 0x7f, 0x11, 0xe4, 0x28, 0x32, 0xfe, 0xca, 0x82, 0x31, 0x4e, 0x10, 0xdb, 0xea, 0xbf, 0xa5, - 0xe3, 0x30, 0x6b, 0x7e, 0x51, 0xa6, 0xf3, 0xe5, 0x35, 0xb2, 0xb7, 0xe6, 0x57, 0x9c, 0x68, 0x2b, - 0xfb, 0xa3, 0x8c, 0xc1, 0xea, 0x69, 0x3b, 0x58, 0x75, 0xb9, 0x80, 0x8c, 0x6c, 0x5f, 0x1d, 0x02, - 0x04, 0x1c, 0x36, 0xdb, 0x97, 0xfd, 0x67, 0x16, 0x20, 0x5e, 0x8d, 0x21, 0xb8, 0x51, 0x71, 0x88, - 0x41, 0xb5, 0x83, 0x2e, 0xde, 0x9a, 0x14, 0x06, 0x6b, 0x54, 0x47, 0xd2, 0x3d, 0x09, 0x87, 0x8b, - 0x62, 0x67, 0x87, 0x8b, 0x43, 0xf4, 0xe8, 0x3f, 0xed, 0x83, 0xe4, 0xcb, 0x3e, 0x74, 0x13, 0x86, - 0x6a, 0x4e, 0xd3, 0x59, 0x77, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x9d, 0x37, 0xd6, 0x9c, 0x46, 0x27, - 0x4c, 0xe4, 0x1a, 0x04, 0x1b, 0x7c, 0xd0, 0x34, 0x40, 0x33, 0x70, 0x77, 0xdd, 0x06, 0xd9, 0x64, - 0x6a, 0x17, 0x16, 0x51, 0x85, 0xbb, 0x86, 0x49, 0x28, 0xd6, 0x28, 0x32, 0xc2, 0x28, 0x14, 0x1f, - 0x70, 0x18, 0x05, 0x38, 0xb6, 0x30, 0x0a, 0x3d, 0x87, 0x0a, 0xa3, 0x30, 0x70, 0xe8, 0x30, 0x0a, - 0xbd, 0x5d, 0x85, 0x51, 0xc0, 0x70, 0x4a, 0xca, 0x9e, 0xf4, 0xff, 0xa2, 0xdb, 0x20, 0xe2, 0xc2, - 0xc1, 0xc3, 0xc0, 0x4c, 0xdd, 0xdb, 0x2f, 0x9f, 0xc2, 0x99, 0x14, 0x38, 0xa7, 0x24, 0xfa, 0x28, - 0x4c, 0x3a, 0x8d, 0x86, 0x7f, 0x5b, 0x0d, 0xea, 0x42, 0x58, 0x73, 0x1a, 0xdc, 0x04, 0xd2, 0xcf, - 0xb8, 0x9e, 0xb9, 0xb7, 0x5f, 0x9e, 0x9c, 0xc9, 0xa1, 0xc1, 0xb9, 0xa5, 0xd1, 0xab, 0x50, 0x6a, - 0x06, 0x7e, 0x6d, 0x45, 0x7b, 0x7e, 0x7c, 0x8e, 0x76, 0x60, 0x45, 0x02, 0x0f, 0xf6, 0xcb, 0xc3, - 0xea, 0x0f, 0x3b, 0xf0, 0xe3, 0x02, 0x19, 0x71, 0x11, 0x06, 0x8f, 0x34, 0x2e, 0xc2, 0x36, 0x4c, - 0x54, 0x49, 0xe0, 0x3a, 0x0d, 0xf7, 0x2e, 0x95, 0x97, 0xe5, 0xfe, 0xb4, 0x06, 0xa5, 0x20, 0xb1, - 0x23, 0x77, 0x15, 0xac, 0x57, 0x4b, 0xb8, 0x24, 0x77, 0xe0, 0x98, 0x91, 0xfd, 0x5f, 0x2d, 0xe8, - 0x17, 0x2f, 0xf9, 0x8e, 0x41, 0x6a, 0x9c, 0x31, 0x8c, 0x12, 0xe5, 0xec, 0x0e, 0x63, 0x8d, 0xc9, - 0x35, 0x47, 0x2c, 0x25, 0xcc, 0x11, 0x8f, 0xb4, 0x63, 0xd2, 0xde, 0x10, 0xf1, 0xff, 0x16, 0xa9, - 0xf4, 0x6e, 0xbc, 0x29, 0x7f, 0xf0, 0x5d, 0xb0, 0x0a, 0xfd, 0xa1, 0x78, 0xd3, 0x5c, 0xc8, 0x7f, - 0x0d, 0x92, 0x1c, 0xc4, 0xd8, 0x8b, 0x4e, 0xbc, 0x62, 0x96, 0x4c, 0x32, 0x1f, 0x4b, 0x17, 0x1f, - 0xe0, 0x63, 0xe9, 0x4e, 0xaf, 0xee, 0x7b, 0x8e, 0xe2, 0xd5, 0xbd, 0xfd, 0x75, 0x76, 0x72, 0xea, - 0xf0, 0x63, 0x10, 0xaa, 0xae, 0x98, 0x67, 0xac, 0xdd, 0x66, 0x66, 0x89, 0x46, 0xe5, 0x08, 0x57, - 0xbf, 0x60, 0xc1, 0xd9, 0x8c, 0xaf, 0xd2, 0x24, 0xad, 0x67, 0x60, 0xc0, 0x69, 0xd5, 0x5d, 0xb5, - 0x96, 0x35, 0xd3, 0xe4, 0x8c, 0x80, 0x63, 0x45, 0x81, 0xe6, 0x60, 0x9c, 0xdc, 0x69, 0xba, 0xdc, - 0x90, 0xab, 0x3b, 0x1f, 0x17, 0xf9, 0xf3, 0xcf, 0x85, 0x24, 0x12, 0xa7, 0xe9, 0x55, 0x80, 0xa8, - 0x62, 0x6e, 0x80, 0xa8, 0x9f, 0xb7, 0x60, 0x50, 0xbd, 0xea, 0x7d, 0xe0, 0xbd, 0xfd, 0x11, 0xb3, - 0xb7, 0x1f, 0x6e, 0xd3, 0xdb, 0x39, 0xdd, 0xfc, 0x7b, 0x05, 0xd5, 0xde, 0x8a, 0x1f, 0x44, 0x5d, - 0x48, 0x70, 0xf7, 0xff, 0x70, 0xe2, 0x32, 0x0c, 0x3a, 0xcd, 0xa6, 0x44, 0x48, 0x0f, 0x38, 0x16, - 0x7a, 0x3d, 0x06, 0x63, 0x9d, 0x46, 0xbd, 0xe3, 0x28, 0xe6, 0xbe, 0xe3, 0xa8, 0x03, 0x44, 0x4e, - 0xb0, 0x49, 0x22, 0x0a, 0x13, 0x0e, 0xbb, 0xf9, 0xfb, 0x4d, 0x2b, 0x72, 0x1b, 0xd3, 0xae, 0x17, - 0x85, 0x51, 0x30, 0xbd, 0xe4, 0x45, 0xd7, 0x03, 0x7e, 0x85, 0xd4, 0x42, 0xac, 0x29, 0x5e, 0x58, - 0xe3, 0x2b, 0x23, 0x58, 0xb0, 0x3a, 0x7a, 0x4d, 0x57, 0x8a, 0x55, 0x01, 0xc7, 0x8a, 0xc2, 0xfe, - 0x20, 0x3b, 0x7d, 0x58, 0x9f, 0x1e, 0x2e, 0xbc, 0xd8, 0x4f, 0x0e, 0xa9, 0xd1, 0x60, 0x46, 0xd1, - 0x79, 0x3d, 0x88, 0x59, 0xfb, 0xcd, 0x9e, 0x56, 0xac, 0xbf, 0x88, 0x8c, 0x23, 0x9d, 0xa1, 0x8f, - 0xa7, 0xdc, 0x63, 0x9e, 0xed, 0x70, 0x6a, 0x1c, 0xc2, 0x21, 0x86, 0xe5, 0x61, 0x62, 0x59, 0x6a, - 0x96, 0x2a, 0x62, 0x5d, 0x68, 0x79, 0x98, 0x04, 0x02, 0xc7, 0x34, 0x54, 0x98, 0x52, 0x7f, 0xc2, - 0x49, 0x14, 0xc7, 0x02, 0x56, 0xd4, 0x21, 0xd6, 0x28, 0xd0, 0x25, 0xa1, 0x50, 0xe0, 0x76, 0x81, - 0x87, 0x13, 0x0a, 0x05, 0xd9, 0x5d, 0x9a, 0x16, 0xe8, 0x32, 0x0c, 0x92, 0x3b, 0x11, 0x09, 0x3c, - 0xa7, 0x41, 0x6b, 0xe8, 0x8d, 0xe3, 0x67, 0x2e, 0xc4, 0x60, 0xac, 0xd3, 0xa0, 0x35, 0x18, 0x0d, - 0xb9, 0x9e, 0x4d, 0x05, 0x89, 0xe7, 0xfa, 0xca, 0xa7, 0xd4, 0x7b, 0x6a, 0x13, 0x7d, 0xc0, 0x40, - 0x7c, 0x77, 0x92, 0x51, 0x26, 0x92, 0x2c, 0xd0, 0x6b, 0x30, 0xd2, 0xf0, 0x9d, 0xfa, 0xac, 0xd3, - 0x70, 0xbc, 0x1a, 0xeb, 0x9f, 0x01, 0x33, 0x1d, 0xf5, 0xb2, 0x81, 0xc5, 0x09, 0x6a, 0x2a, 0xbc, - 0xe9, 0x10, 0x11, 0xa6, 0xcd, 0xf1, 0x36, 0x49, 0x28, 0xb2, 0xc2, 0x33, 0xe1, 0x6d, 0x39, 0x87, - 0x06, 0xe7, 0x96, 0x46, 0x2f, 0xc1, 0x90, 0xfc, 0x7c, 0x2d, 0x28, 0x4b, 0xfc, 0x24, 0x46, 0xc3, - 0x61, 0x83, 0x12, 0x85, 0x70, 0x52, 0xfe, 0x5f, 0x0b, 0x9c, 0x8d, 0x0d, 0xb7, 0x26, 0x22, 0x15, - 0xf0, 0xe7, 0xc3, 0x1f, 0x96, 0x6f, 0x15, 0x17, 0xb2, 0x88, 0x0e, 0xf6, 0xcb, 0x67, 0x44, 0xaf, - 0x65, 0xe2, 0x71, 0x36, 0x6f, 0xb4, 0x02, 0x13, 0x5b, 0xc4, 0x69, 0x44, 0x5b, 0x73, 0x5b, 0xa4, - 0xb6, 0x2d, 0x17, 0x1c, 0x0b, 0xf3, 0xa2, 0x3d, 0x1d, 0xb9, 0x9a, 0x26, 0xc1, 0x59, 0xe5, 0xd0, - 0x5b, 0x30, 0xd9, 0x6c, 0xad, 0x37, 0xdc, 0x70, 0x6b, 0xd5, 0x8f, 0x98, 0x13, 0xd2, 0x4c, 0xbd, - 0x1e, 0x90, 0x90, 0xbf, 0x2e, 0x65, 0x47, 0xaf, 0x0c, 0xa4, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, 0x1c, - 0xd0, 0x5d, 0x38, 0x99, 0x98, 0x08, 0x22, 0x22, 0xc6, 0x48, 0x7e, 0x8a, 0x98, 0x6a, 0x56, 0x01, - 0x11, 0x5c, 0x26, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xcd, 0x45, 0x67, 0xc7, 0x6d, - 0xd0, 0xab, 0xe2, 0x04, 0x9b, 0x23, 0xf4, 0xda, 0x00, 0x4b, 0x15, 0x09, 0xa5, 0x7b, 0xb3, 0xf8, - 0xb7, 0x87, 0x35, 0x6a, 0xb4, 0x0c, 0x23, 0xe2, 0xdf, 0x9e, 0x18, 0x52, 0x1e, 0x98, 0xe5, 0x31, - 0x16, 0x55, 0xab, 0xa2, 0x63, 0x0e, 0x52, 0x10, 0x9c, 0x28, 0x8b, 0x36, 0xe1, 0xac, 0x4c, 0xf4, - 0xa7, 0xcf, 0x4f, 0x39, 0x06, 0x21, 0xcb, 0xcb, 0x32, 0xc0, 0x5f, 0xa5, 0xcc, 0xb4, 0x23, 0xc4, - 0xed, 0xf9, 0xd0, 0x73, 0x5d, 0x9f, 0xe6, 0xfc, 0xcd, 0xf1, 0xc9, 0x38, 0xe2, 0xe0, 0x72, 0x12, - 0x89, 0xd3, 0xf4, 0xc8, 0x87, 0x93, 0xae, 0x97, 0x35, 0xab, 0x4f, 0x31, 0x46, 0x1f, 0xe2, 0xcf, - 0xad, 0xdb, 0xcf, 0xe8, 0x4c, 0x3c, 0xce, 0xe6, 0xfb, 0xce, 0xfc, 0xfe, 0x7e, 0xdf, 0xa2, 0xa5, - 0x35, 0xe9, 0x1c, 0x7d, 0x0a, 0x86, 0xf4, 0x8f, 0x12, 0x92, 0xc6, 0x85, 0x6c, 0xe1, 0x55, 0xdb, - 0x13, 0xb8, 0x6c, 0xaf, 0xd6, 0xbd, 0x8e, 0xc3, 0x06, 0x47, 0x54, 0xcb, 0x88, 0x6d, 0x70, 0xa9, - 0x3b, 0x49, 0xa6, 0x7b, 0xb7, 0x37, 0x02, 0xd9, 0xd3, 0x1d, 0x2d, 0xc3, 0x40, 0xad, 0xe1, 0x12, - 0x2f, 0x5a, 0xaa, 0xb4, 0x8b, 0xde, 0x38, 0x27, 0x68, 0xc4, 0xfa, 0x11, 0x29, 0x56, 0x38, 0x0c, - 0x2b, 0x0e, 0xf6, 0x6f, 0x16, 0xa0, 0xdc, 0x21, 0x5f, 0x4f, 0xc2, 0x0c, 0x65, 0x75, 0x65, 0x86, - 0x9a, 0x81, 0xd1, 0xf8, 0x9f, 0xae, 0xe1, 0x52, 0x9e, 0xac, 0x37, 0x4d, 0x34, 0x4e, 0xd2, 0x77, - 0xfd, 0x28, 0x41, 0xb7, 0x64, 0xf5, 0x74, 0x7c, 0x56, 0x63, 0x58, 0xb0, 0x7b, 0xbb, 0xbf, 0xf6, - 0xe6, 0x5a, 0x23, 0xed, 0xaf, 0x17, 0xe0, 0xa4, 0xea, 0xc2, 0xef, 0xdc, 0x8e, 0xbb, 0x91, 0xee, - 0xb8, 0x23, 0xb0, 0xe5, 0xda, 0xd7, 0xa1, 0x8f, 0x87, 0xa3, 0xec, 0x42, 0xdc, 0x7e, 0xd4, 0x8c, - 0x92, 0xad, 0x24, 0x3c, 0x23, 0x52, 0xf6, 0x0f, 0x58, 0x30, 0x9a, 0x78, 0xdd, 0x86, 0xb0, 0xf6, - 0x04, 0xfa, 0x7e, 0x44, 0xe2, 0x2c, 0x61, 0xfb, 0x3c, 0xf4, 0x6c, 0xf9, 0x61, 0x94, 0x74, 0xf4, - 0xb8, 0xea, 0x87, 0x11, 0x66, 0x18, 0xfb, 0x0f, 0x2d, 0xe8, 0x5d, 0x73, 0x5c, 0x2f, 0x92, 0x46, - 0x01, 0x2b, 0xc7, 0x28, 0xd0, 0xcd, 0x77, 0xa1, 0x17, 0xa1, 0x8f, 0x6c, 0x6c, 0x90, 0x5a, 0x24, - 0x46, 0x55, 0x86, 0x42, 0xe8, 0x5b, 0x60, 0x50, 0x2a, 0xff, 0xb1, 0xca, 0xf8, 0x5f, 0x2c, 0x88, - 0xd1, 0x2d, 0x28, 0x45, 0xee, 0x0e, 0x99, 0xa9, 0xd7, 0x85, 0xa9, 0xfc, 0x3e, 0xe2, 0x77, 0xac, - 0x49, 0x06, 0x38, 0xe6, 0x65, 0x7f, 0xa9, 0x00, 0x10, 0xc7, 0xf1, 0xea, 0xf4, 0x89, 0xb3, 0x29, - 0x23, 0xea, 0x85, 0x0c, 0x23, 0x2a, 0x8a, 0x19, 0x66, 0x58, 0x50, 0x55, 0x37, 0x15, 0xbb, 0xea, - 0xa6, 0x9e, 0xc3, 0x74, 0xd3, 0x1c, 0x8c, 0xc7, 0x71, 0xc8, 0xcc, 0x30, 0x8c, 0xec, 0xe8, 0x5c, - 0x4b, 0x22, 0x71, 0x9a, 0xde, 0x26, 0x70, 0x5e, 0x85, 0x63, 0x12, 0x27, 0x1a, 0xf3, 0x03, 0xd7, - 0x8d, 0xd2, 0x1d, 0xfa, 0x29, 0xb6, 0x12, 0x17, 0x72, 0xad, 0xc4, 0x3f, 0x61, 0xc1, 0x89, 0x64, - 0x3d, 0xec, 0xd1, 0xf4, 0x17, 0x2c, 0x38, 0xc9, 0x6c, 0xe5, 0xac, 0xd6, 0xb4, 0x65, 0xfe, 0x85, - 0xb6, 0x21, 0xa6, 0x72, 0x5a, 0x1c, 0xc7, 0xdc, 0x58, 0xc9, 0x62, 0x8d, 0xb3, 0x6b, 0xb4, 0xff, - 0x4b, 0x0f, 0x4c, 0xe6, 0xc5, 0xa6, 0x62, 0xcf, 0x44, 0x9c, 0x3b, 0xd5, 0x6d, 0x72, 0x5b, 0x38, - 0xe3, 0xc7, 0xcf, 0x44, 0x38, 0x18, 0x4b, 0x7c, 0x32, 0xfd, 0x49, 0xa1, 0xcb, 0xf4, 0x27, 0x5b, - 0x30, 0x7e, 0x7b, 0x8b, 0x78, 0x37, 0xbc, 0xd0, 0x89, 0xdc, 0x70, 0xc3, 0x65, 0x76, 0x65, 0x3e, - 0x6f, 0x64, 0x0e, 0xea, 0xf1, 0x5b, 0x49, 0x82, 0x83, 0xfd, 0xf2, 0x59, 0x03, 0x10, 0x37, 0x99, - 0x6f, 0x24, 0x38, 0xcd, 0x34, 0x9d, 0x3d, 0xa6, 0xe7, 0x01, 0x67, 0x8f, 0xd9, 0x71, 0x85, 0x37, - 0x8a, 0x7c, 0x03, 0xc0, 0x6e, 0x8c, 0x2b, 0x0a, 0x8a, 0x35, 0x0a, 0xf4, 0x09, 0x40, 0x7a, 0x86, - 0x2e, 0x23, 0x34, 0xe8, 0xb3, 0xf7, 0xf6, 0xcb, 0x68, 0x35, 0x85, 0x3d, 0xd8, 0x2f, 0x4f, 0x50, - 0xe8, 0x92, 0x47, 0x6f, 0x9e, 0x71, 0x3c, 0xb5, 0x0c, 0x46, 0xe8, 0x16, 0x8c, 0x51, 0x28, 0x5b, - 0x51, 0x32, 0xee, 0x28, 0xbf, 0x2d, 0x3e, 0x7d, 0x6f, 0xbf, 0x3c, 0xb6, 0x9a, 0xc0, 0xe5, 0xb1, - 0x4e, 0x31, 0x41, 0x2f, 0xc3, 0x48, 0x3c, 0xaf, 0xae, 0x91, 0x3d, 0x1e, 0xa0, 0xa7, 0xc4, 0x15, - 0xde, 0x2b, 0x06, 0x06, 0x27, 0x28, 0xed, 0x2f, 0x58, 0x70, 0x3a, 0x37, 0x23, 0x3e, 0xba, 0x08, - 0x03, 0x4e, 0xd3, 0xe5, 0xe6, 0x0b, 0x71, 0xd4, 0x30, 0x35, 0x59, 0x65, 0x89, 0x1b, 0x2f, 0x14, - 0x96, 0xee, 0xf0, 0xdb, 0xae, 0x57, 0x4f, 0xee, 0xf0, 0xd7, 0x5c, 0xaf, 0x8e, 0x19, 0x46, 0x1d, - 0x59, 0xc5, 0xdc, 0xa7, 0x08, 0x5f, 0xa5, 0x6b, 0x35, 0x23, 0x77, 0xfe, 0xf1, 0x36, 0x03, 0x3d, - 0xad, 0x9b, 0x1a, 0x85, 0x57, 0x61, 0xae, 0x99, 0xf1, 0xf3, 0x16, 0x88, 0xa7, 0xcb, 0x5d, 0x9c, - 0xc9, 0x6f, 0xc2, 0xd0, 0x6e, 0x3a, 0x7b, 0xe1, 0xf9, 0xfc, 0xb7, 0xdc, 0x22, 0xe2, 0xba, 0x12, - 0xb4, 0x8d, 0x4c, 0x85, 0x06, 0x2f, 0xbb, 0x0e, 0x02, 0x3b, 0x4f, 0x98, 0x41, 0xa1, 0x73, 0x6b, - 0x9e, 0x03, 0xa8, 0x33, 0x5a, 0x96, 0xd2, 0xb8, 0x60, 0x4a, 0x5c, 0xf3, 0x0a, 0x83, 0x35, 0x2a, - 0xfb, 0x9f, 0x15, 0x60, 0x50, 0x66, 0xcb, 0x6b, 0x79, 0xdd, 0xa8, 0xfd, 0x0e, 0x95, 0x3e, 0x1b, - 0x5d, 0x82, 0x12, 0xd3, 0x4b, 0x57, 0x62, 0x6d, 0xa9, 0xd2, 0x0a, 0xad, 0x48, 0x04, 0x8e, 0x69, - 0xe8, 0xee, 0x18, 0xb6, 0xd6, 0x19, 0x79, 0xe2, 0xa1, 0x6d, 0x95, 0x83, 0xb1, 0xc4, 0xa3, 0x8f, - 0xc2, 0x18, 0x2f, 0x17, 0xf8, 0x4d, 0x67, 0x93, 0xdb, 0xb2, 0x7a, 0x55, 0xf4, 0x92, 0xb1, 0x95, - 0x04, 0xee, 0x60, 0xbf, 0x7c, 0x22, 0x09, 0x63, 0x46, 0xda, 0x14, 0x17, 0xe6, 0xb2, 0xc6, 0x2b, - 0xa1, 0xbb, 0x7a, 0xca, 0xd3, 0x2d, 0x46, 0x61, 0x9d, 0xce, 0xfe, 0x14, 0xa0, 0x74, 0xde, 0x40, - 0xf4, 0x3a, 0x77, 0x79, 0x76, 0x03, 0x52, 0x6f, 0x67, 0xb4, 0xd5, 0x63, 0x74, 0xc8, 0x37, 0x72, - 0xbc, 0x14, 0x56, 0xe5, 0xed, 0xff, 0xa3, 0x08, 0x63, 0xc9, 0xa8, 0x00, 0xe8, 0x2a, 0xf4, 0x71, - 0x91, 0x52, 0xb0, 0x6f, 0xe3, 0x13, 0xa4, 0xc5, 0x12, 0x60, 0x87, 0xab, 0x90, 0x4a, 0x45, 0x79, - 0xf4, 0x16, 0x0c, 0xd6, 0xfd, 0xdb, 0xde, 0x6d, 0x27, 0xa8, 0xcf, 0x54, 0x96, 0xc4, 0x74, 0xce, - 0x54, 0x54, 0xcc, 0xc7, 0x64, 0x7a, 0x7c, 0x02, 0x66, 0xff, 0x8e, 0x51, 0x58, 0x67, 0x87, 0xd6, - 0x58, 0xa2, 0x8f, 0x0d, 0x77, 0x73, 0xc5, 0x69, 0xb6, 0x7b, 0xff, 0x32, 0x27, 0x89, 0x34, 0xce, - 0xc3, 0x22, 0x1b, 0x08, 0x47, 0xe0, 0x98, 0x11, 0xfa, 0x0c, 0x4c, 0x84, 0x39, 0xa6, 0x93, 0xbc, - 0x34, 0xb2, 0xed, 0xac, 0x09, 0xb3, 0x0f, 0xdd, 0xdb, 0x2f, 0x4f, 0x64, 0x19, 0x59, 0xb2, 0xaa, - 0xb1, 0x3f, 0xdf, 0x03, 0x53, 0x32, 0x5d, 0x66, 0x86, 0xb7, 0xfd, 0xe7, 0xac, 0x84, 0xbb, 0xfd, - 0xcb, 0xf9, 0x7b, 0xc3, 0x03, 0x73, 0xba, 0xff, 0x62, 0xda, 0xe9, 0xfe, 0xd5, 0x43, 0x36, 0xe3, - 0xc8, 0x5c, 0xef, 0xbf, 0x63, 0xfd, 0xe5, 0xbf, 0x7c, 0x02, 0x8c, 0xdd, 0xdc, 0x48, 0x2f, 0x6f, - 0x1d, 0x51, 0x7a, 0x79, 0x0c, 0x03, 0x64, 0xa7, 0x19, 0xed, 0xcd, 0xbb, 0x81, 0x68, 0x71, 0x26, - 0xcf, 0x05, 0x41, 0x93, 0xe6, 0x29, 0x31, 0x58, 0xf1, 0x41, 0xbb, 0x90, 0x4e, 0xd0, 0x2f, 0x56, - 0x67, 0xe6, 0xea, 0x49, 0x25, 0xf8, 0x37, 0x6a, 0x61, 0xf7, 0x85, 0x14, 0x09, 0x4e, 0x57, 0xc1, - 0xb2, 0x5b, 0x67, 0xe5, 0xe5, 0x17, 0x2b, 0x37, 0x53, 0x5c, 0xcf, 0xca, 0xee, 0x9f, 0xce, 0x6e, - 0x9d, 0x45, 0x85, 0x33, 0xeb, 0x42, 0xab, 0xd0, 0xbf, 0xe9, 0x46, 0x98, 0x34, 0x7d, 0x71, 0xab, - 0xcf, 0xdc, 0x90, 0xae, 0x70, 0x92, 0x74, 0xb6, 0x69, 0x81, 0xc0, 0x92, 0x09, 0x7a, 0x5d, 0x6d, - 0xc5, 0x7d, 0xf9, 0x9a, 0xb7, 0xb4, 0x17, 0x53, 0xe6, 0x66, 0x2c, 0x32, 0xfd, 0xf7, 0xdf, 0x6f, - 0xa6, 0xff, 0x45, 0x99, 0x9f, 0x7f, 0x20, 0xff, 0xd5, 0x22, 0x4b, 0xbf, 0xdf, 0x21, 0x2b, 0xff, - 0x4d, 0x28, 0x6d, 0x72, 0x2b, 0x8c, 0x4a, 0xa1, 0x9f, 0x79, 0x24, 0x5c, 0x91, 0x44, 0xe9, 0x7c, - 0xd7, 0x0a, 0x85, 0x63, 0x56, 0xe8, 0xf3, 0x16, 0x9c, 0x4c, 0xe6, 0x1c, 0x66, 0x8f, 0x6b, 0x84, - 0xc3, 0xcf, 0x8b, 0xdd, 0x24, 0x81, 0x66, 0x05, 0x8c, 0x0a, 0x99, 0xb2, 0x3c, 0x93, 0x0c, 0x67, - 0x57, 0x47, 0x3b, 0x3a, 0x58, 0xaf, 0x0b, 0xc7, 0x93, 0xcc, 0x8e, 0x4e, 0x04, 0x22, 0xe1, 0x1d, - 0x8d, 0x67, 0xe7, 0x31, 0x2d, 0x88, 0xd6, 0x32, 0xd2, 0xef, 0x3f, 0x96, 0x97, 0x7e, 0xbf, 0xeb, - 0xa4, 0xfb, 0xaf, 0x43, 0x5f, 0xcd, 0xf5, 0xea, 0x24, 0x10, 0x19, 0xf7, 0x33, 0xa7, 0xd2, 0x1c, - 0xa3, 0x48, 0x4f, 0x25, 0x0e, 0xc7, 0x82, 0x03, 0xe3, 0x45, 0x9a, 0x5b, 0x1b, 0x61, 0xbb, 0x10, - 0xf3, 0x73, 0xa4, 0xb9, 0x95, 0x98, 0x50, 0x9c, 0x17, 0x83, 0x63, 0xc1, 0x81, 0x2e, 0x99, 0x0d, - 0xba, 0x80, 0x48, 0xd0, 0x2e, 0x7b, 0xfe, 0x22, 0x27, 0x49, 0x2f, 0x19, 0x81, 0xc0, 0x92, 0x09, - 0xfa, 0xa4, 0x29, 0x73, 0xf0, 0xfc, 0xf9, 0x4f, 0x77, 0x90, 0x39, 0x0c, 0xbe, 0xed, 0xa5, 0x8e, - 0x97, 0xa1, 0xb0, 0x51, 0x13, 0x29, 0xf3, 0x33, 0x95, 0xc5, 0x8b, 0x73, 0x06, 0x37, 0x16, 0xb2, - 0x79, 0x71, 0x0e, 0x17, 0x36, 0x6a, 0x2a, 0xb3, 0xff, 0xa2, 0xdb, 0x90, 0x79, 0xef, 0xf3, 0x33, - 0xfb, 0x53, 0xa2, 0x9c, 0xcc, 0xfe, 0x14, 0x85, 0x63, 0x56, 0x94, 0x6f, 0x2c, 0x09, 0x4d, 0xe4, - 0xf3, 0x55, 0x02, 0x4f, 0x9a, 0x6f, 0xa6, 0x2c, 0xb4, 0x0d, 0xc3, 0xbb, 0x61, 0x73, 0x8b, 0xc8, - 0x5d, 0x51, 0x24, 0xbd, 0xcf, 0x7c, 0x99, 0x7e, 0x53, 0x10, 0xba, 0x41, 0xd4, 0x72, 0x1a, 0xa9, - 0x8d, 0x9c, 0xdd, 0xc6, 0x6f, 0xea, 0xcc, 0xb0, 0xc9, 0x9b, 0x4e, 0x84, 0xb7, 0x79, 0xf8, 0x28, - 0x91, 0x0f, 0x3f, 0x73, 0x22, 0x64, 0x44, 0x98, 0xe2, 0x13, 0x41, 0x20, 0xb0, 0x64, 0xa2, 0x3a, - 0x9b, 0x1d, 0x40, 0xa7, 0x3a, 0x74, 0x76, 0xaa, 0xbd, 0x71, 0x67, 0xb3, 0x03, 0x27, 0x66, 0xc5, - 0x0e, 0x9a, 0x66, 0x46, 0xee, 0xe7, 0xc9, 0x87, 0xf2, 0x0f, 0x9a, 0x4e, 0xb9, 0xa2, 0xf9, 0x41, - 0x93, 0x45, 0x85, 0x33, 0xeb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0xc0, 0x44, 0x48, 0xfc, 0x27, 0x73, - 0x02, 0xe9, 0xa5, 0xc3, 0x85, 0xf1, 0x8f, 0x53, 0x28, 0x1c, 0xb3, 0x42, 0x75, 0x18, 0x69, 0x1a, - 0x11, 0x26, 0x59, 0x68, 0xff, 0x1c, 0xb9, 0x20, 0x2b, 0x16, 0x25, 0x57, 0x2a, 0x98, 0x18, 0x9c, - 0xe0, 0xc9, 0xfc, 0xac, 0xf8, 0xa3, 0x29, 0x16, 0xf9, 0x3f, 0x67, 0xa8, 0x33, 0xde, 0x55, 0xf1, - 0xa1, 0x16, 0x08, 0x2c, 0x99, 0xd0, 0xde, 0x10, 0x4f, 0x7d, 0xfc, 0x90, 0x25, 0xd0, 0xc8, 0x33, - 0x87, 0x66, 0x59, 0x16, 0x64, 0x58, 0x65, 0x81, 0xc2, 0x31, 0x2b, 0xba, 0x93, 0xd3, 0x03, 0xef, - 0x4c, 0xfe, 0x4e, 0x9e, 0x3c, 0xee, 0xd8, 0x4e, 0x4e, 0x0f, 0xbb, 0xa2, 0x38, 0xea, 0x54, 0x14, - 0x60, 0x16, 0xfc, 0x3f, 0xa7, 0x5d, 0x2a, 0x8c, 0x70, 0xba, 0x5d, 0x0a, 0x85, 0x63, 0x56, 0xf6, - 0x0f, 0x16, 0xe0, 0x5c, 0xfb, 0xf5, 0x16, 0x9b, 0x4b, 0x2a, 0xb1, 0x67, 0x48, 0xc2, 0x5c, 0xc2, - 0x2f, 0xef, 0x31, 0x55, 0xd7, 0x81, 0x41, 0xaf, 0xc0, 0xb8, 0x7a, 0x90, 0xd5, 0x70, 0x6b, 0x7b, - 0xab, 0xb1, 0xbe, 0x44, 0x85, 0xd0, 0xa8, 0x26, 0x09, 0x70, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x03, - 0xb8, 0x34, 0x2f, 0x2e, 0xe9, 0x71, 0xb8, 0x79, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0xcf, 0x5a, 0xf0, - 0x50, 0x4e, 0xee, 0xdf, 0xae, 0xe3, 0x5e, 0x6e, 0xc0, 0x68, 0xd3, 0x2c, 0xda, 0x21, 0x54, 0xaf, - 0x91, 0x61, 0x58, 0xb5, 0x35, 0x81, 0xc0, 0x49, 0xa6, 0xf6, 0x4f, 0x17, 0xe0, 0x6c, 0x5b, 0x0f, - 0x63, 0x84, 0xe1, 0xd4, 0xe6, 0x4e, 0xe8, 0xcc, 0x05, 0xa4, 0x4e, 0xbc, 0xc8, 0x75, 0x1a, 0xd5, - 0x26, 0xa9, 0x69, 0x06, 0x2f, 0xe6, 0xaa, 0x7b, 0x65, 0xa5, 0x3a, 0x93, 0xa6, 0xc0, 0x39, 0x25, - 0xd1, 0x22, 0xa0, 0x34, 0x46, 0x8c, 0x30, 0x4b, 0x23, 0x91, 0xe6, 0x87, 0x33, 0x4a, 0xa0, 0x0f, - 0xc2, 0xb0, 0xf2, 0x5c, 0xd6, 0x46, 0x9c, 0x6d, 0xec, 0x58, 0x47, 0x60, 0x93, 0x0e, 0x5d, 0xe6, - 0x79, 0x48, 0x44, 0xc6, 0x1a, 0x61, 0x1d, 0x1b, 0x95, 0x49, 0x46, 0x04, 0x18, 0xeb, 0x34, 0xb3, - 0x2f, 0xfd, 0xd6, 0x37, 0xcf, 0xbd, 0xef, 0x77, 0xbf, 0x79, 0xee, 0x7d, 0x7f, 0xf0, 0xcd, 0x73, - 0xef, 0xfb, 0x9e, 0x7b, 0xe7, 0xac, 0xdf, 0xba, 0x77, 0xce, 0xfa, 0xdd, 0x7b, 0xe7, 0xac, 0x3f, - 0xb8, 0x77, 0xce, 0xfa, 0xe3, 0x7b, 0xe7, 0xac, 0x2f, 0xfd, 0xc9, 0xb9, 0xf7, 0xbd, 0x89, 0xe2, - 0x48, 0xb2, 0x97, 0xe8, 0xe8, 0x5c, 0xda, 0xbd, 0xfc, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf4, - 0x14, 0xad, 0xc8, 0xda, 0x15, 0x01, 0x00, + // 15230 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, + 0x75, 0x18, 0xcc, 0xea, 0x9e, 0xab, 0xdf, 0xdc, 0x39, 0x00, 0x76, 0x30, 0x0b, 0xa0, 0xb1, 0xb5, + 0xbb, 0x58, 0xec, 0x35, 0x20, 0xf6, 0x20, 0x97, 0xbb, 0xcb, 0x15, 0xe7, 0x04, 0x7a, 0x81, 0x19, + 0xf4, 0x66, 0x0f, 0x00, 0x72, 0xb9, 0xe4, 0xc7, 0x42, 0x77, 0xce, 0x4c, 0x71, 0x7a, 0xaa, 0x7a, + 0xab, 0xaa, 0x07, 0x18, 0x7c, 0x64, 0x48, 0xa2, 0x3e, 0x51, 0x22, 0xa5, 0xef, 0x0b, 0xc6, 0x17, + 0xfa, 0x8e, 0xa0, 0x14, 0x8a, 0x2f, 0x24, 0x7d, 0x96, 0x64, 0x4a, 0xb2, 0x69, 0xca, 0x92, 0x2c, + 0xea, 0xf2, 0x15, 0x96, 0x1c, 0x0e, 0x59, 0x56, 0x84, 0x45, 0x45, 0x28, 0x3c, 0x12, 0x21, 0x47, + 0xc8, 0xfa, 0x61, 0x49, 0x3e, 0x7e, 0xd8, 0x63, 0xd9, 0x72, 0xe4, 0x59, 0x99, 0x75, 0x74, 0xf7, + 0x60, 0x07, 0xc3, 0x25, 0x63, 0xff, 0x75, 0xbf, 0xf7, 0xf2, 0x65, 0x56, 0x9e, 0x2f, 0xdf, 0x7b, + 0xf9, 0x1e, 0xbc, 0xb2, 0xf5, 0x52, 0x38, 0xeb, 0xfa, 0x17, 0xb6, 0xda, 0xb7, 0x48, 0xe0, 0x91, + 0x88, 0x84, 0x17, 0x76, 0x88, 0xd7, 0xf0, 0x83, 0x0b, 0x02, 0xe1, 0xb4, 0xdc, 0x0b, 0x75, 0x3f, + 0x20, 0x17, 0x76, 0x2e, 0x5e, 0xd8, 0x20, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0x66, 0x5b, 0x81, 0x1f, + 0xf9, 0x08, 0x71, 0x9a, 0x59, 0xa7, 0xe5, 0xce, 0x52, 0x9a, 0xd9, 0x9d, 0x8b, 0x33, 0xcf, 0x6e, + 0xb8, 0xd1, 0x66, 0xfb, 0xd6, 0x6c, 0xdd, 0xdf, 0xbe, 0xb0, 0xe1, 0x6f, 0xf8, 0x17, 0x18, 0xe9, + 0xad, 0xf6, 0x3a, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc5, 0xcc, 0x0b, 0x71, 0x35, 0xdb, 0x4e, + 0x7d, 0xd3, 0xf5, 0x48, 0xb0, 0x7b, 0xa1, 0xb5, 0xb5, 0xc1, 0xea, 0x0d, 0x48, 0xe8, 0xb7, 0x83, + 0x3a, 0x49, 0x56, 0xdc, 0xb1, 0x54, 0x78, 0x61, 0x9b, 0x44, 0x4e, 0x46, 0x73, 0x67, 0x2e, 0xe4, + 0x95, 0x0a, 0xda, 0x5e, 0xe4, 0x6e, 0xa7, 0xab, 0xf9, 0x40, 0xb7, 0x02, 0x61, 0x7d, 0x93, 0x6c, + 0x3b, 0xa9, 0x72, 0xcf, 0xe7, 0x95, 0x6b, 0x47, 0x6e, 0xf3, 0x82, 0xeb, 0x45, 0x61, 0x14, 0x24, + 0x0b, 0xd9, 0xdf, 0xb0, 0xe0, 0xec, 0xdc, 0xcd, 0xda, 0x52, 0xd3, 0x09, 0x23, 0xb7, 0x3e, 0xdf, + 0xf4, 0xeb, 0x5b, 0xb5, 0xc8, 0x0f, 0xc8, 0x0d, 0xbf, 0xd9, 0xde, 0x26, 0x35, 0xd6, 0x11, 0xe8, + 0x19, 0x18, 0xda, 0x61, 0xff, 0x2b, 0x8b, 0xd3, 0xd6, 0x59, 0xeb, 0x7c, 0x69, 0x7e, 0xe2, 0xb7, + 0xf7, 0xca, 0xef, 0xbb, 0xb7, 0x57, 0x1e, 0xba, 0x21, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0x30, 0xb0, + 0x1e, 0xae, 0xed, 0xb6, 0xc8, 0x74, 0x81, 0xd1, 0x8e, 0x09, 0xda, 0x81, 0xe5, 0x1a, 0x85, 0x62, + 0x81, 0x45, 0x17, 0xa0, 0xd4, 0x72, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2e, 0x9e, 0xb5, 0xce, + 0xf7, 0xcf, 0x4f, 0x0a, 0xd2, 0x52, 0x55, 0x22, 0x70, 0x4c, 0x43, 0x9b, 0x11, 0x10, 0xa7, 0x71, + 0xcd, 0x6b, 0xee, 0x4e, 0xf7, 0x9d, 0xb5, 0xce, 0x0f, 0xc5, 0xcd, 0xc0, 0x02, 0x8e, 0x15, 0x85, + 0xfd, 0xe5, 0x02, 0x0c, 0xcd, 0xad, 0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa2, 0x1b, 0x30, 0xe2, 0xf9, + 0x0d, 0x22, 0xff, 0xb3, 0xaf, 0x18, 0x7e, 0xee, 0xec, 0x6c, 0x7a, 0x2a, 0xcd, 0xae, 0x6a, 0x74, + 0xf3, 0x13, 0xf7, 0xf6, 0xca, 0x23, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xe1, 0x96, 0xdf, 0x50, + 0x6c, 0x0b, 0x8c, 0x6d, 0x39, 0x8b, 0x6d, 0x35, 0x26, 0x9b, 0x1f, 0xbf, 0xb7, 0x57, 0x1e, 0xd6, + 0x00, 0x58, 0x67, 0x82, 0x6e, 0xc1, 0x38, 0xfd, 0xeb, 0x45, 0xae, 0xe2, 0x5b, 0x64, 0x7c, 0x1f, + 0xcd, 0xe3, 0xab, 0x91, 0xce, 0x4f, 0xdd, 0xdb, 0x2b, 0x8f, 0x27, 0x80, 0x38, 0xc9, 0xd0, 0xbe, + 0x0b, 0x63, 0x73, 0x51, 0xe4, 0xd4, 0x37, 0x49, 0x83, 0x8f, 0x20, 0x7a, 0x01, 0xfa, 0x3c, 0x67, + 0x9b, 0x88, 0xf1, 0x3d, 0x2b, 0x3a, 0xb6, 0x6f, 0xd5, 0xd9, 0x26, 0xfb, 0x7b, 0xe5, 0x89, 0xeb, + 0x9e, 0xfb, 0x76, 0x5b, 0xcc, 0x0a, 0x0a, 0xc3, 0x8c, 0x1a, 0x3d, 0x07, 0xd0, 0x20, 0x3b, 0x6e, + 0x9d, 0x54, 0x9d, 0x68, 0x53, 0x8c, 0x37, 0x12, 0x65, 0x61, 0x51, 0x61, 0xb0, 0x46, 0x65, 0xdf, + 0x81, 0xd2, 0xdc, 0x8e, 0xef, 0x36, 0xaa, 0x7e, 0x23, 0x44, 0x5b, 0x30, 0xde, 0x0a, 0xc8, 0x3a, + 0x09, 0x14, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f, 0xfc, 0xdc, 0xf9, 0xcc, 0x8f, 0x35, 0x49, 0x97, + 0xbc, 0x28, 0xd8, 0x9d, 0x7f, 0x48, 0xd4, 0x37, 0x9e, 0xc0, 0xe2, 0x24, 0x67, 0xfb, 0x9f, 0x14, + 0xe0, 0xf8, 0xdc, 0xdd, 0x76, 0x40, 0x16, 0xdd, 0x70, 0x2b, 0x39, 0xc3, 0x1b, 0x6e, 0xb8, 0xb5, + 0x1a, 0xf7, 0x80, 0x9a, 0x5a, 0x8b, 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x16, 0x06, 0xe9, 0xef, 0xeb, + 0xb8, 0x22, 0x3e, 0x79, 0x4a, 0x10, 0x0f, 0x2f, 0x3a, 0x91, 0xb3, 0xc8, 0x51, 0x58, 0xd2, 0xa0, + 0x15, 0x18, 0xae, 0xb3, 0x05, 0xb9, 0xb1, 0xe2, 0x37, 0x08, 0x1b, 0xcc, 0xd2, 0xfc, 0xd3, 0x94, + 0x7c, 0x21, 0x06, 0xef, 0xef, 0x95, 0xa7, 0x79, 0xdb, 0x04, 0x0b, 0x0d, 0x87, 0xf5, 0xf2, 0xc8, + 0x56, 0xeb, 0xab, 0x8f, 0x71, 0x82, 0x8c, 0xb5, 0x75, 0x5e, 0x5b, 0x2a, 0xfd, 0x6c, 0xa9, 0x8c, + 0x64, 0x2f, 0x13, 0x74, 0x11, 0xfa, 0xb6, 0x5c, 0xaf, 0x31, 0x3d, 0xc0, 0x78, 0x9d, 0xa6, 0x63, + 0x7e, 0xc5, 0xf5, 0x1a, 0xfb, 0x7b, 0xe5, 0x49, 0xa3, 0x39, 0x14, 0x88, 0x19, 0xa9, 0xfd, 0x9f, + 0x2c, 0x28, 0x33, 0xdc, 0xb2, 0xdb, 0x24, 0x55, 0x12, 0x84, 0x6e, 0x18, 0x11, 0x2f, 0x32, 0x3a, + 0xf4, 0x39, 0x80, 0x90, 0xd4, 0x03, 0x12, 0x69, 0x5d, 0xaa, 0x26, 0x46, 0x4d, 0x61, 0xb0, 0x46, + 0x45, 0x37, 0x84, 0x70, 0xd3, 0x09, 0xd8, 0xfc, 0x12, 0x1d, 0xab, 0x36, 0x84, 0x9a, 0x44, 0xe0, + 0x98, 0xc6, 0xd8, 0x10, 0x8a, 0xdd, 0x36, 0x04, 0xf4, 0x61, 0x18, 0x8f, 0x2b, 0x0b, 0x5b, 0x4e, + 0x5d, 0x76, 0x20, 0x5b, 0x32, 0x35, 0x13, 0x85, 0x93, 0xb4, 0xf6, 0xdf, 0xb6, 0xc4, 0xe4, 0xa1, + 0x5f, 0xfd, 0x2e, 0xff, 0x56, 0xfb, 0x57, 0x2c, 0x18, 0x9c, 0x77, 0xbd, 0x86, 0xeb, 0x6d, 0xa0, + 0x4f, 0xc1, 0x10, 0x3d, 0x9b, 0x1a, 0x4e, 0xe4, 0x88, 0x7d, 0xef, 0xfd, 0xda, 0xda, 0x52, 0x47, + 0xc5, 0x6c, 0x6b, 0x6b, 0x83, 0x02, 0xc2, 0x59, 0x4a, 0x4d, 0x57, 0xdb, 0xb5, 0x5b, 0x9f, 0x26, + 0xf5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39, 0x31, 0x0c, 0x2b, 0xae, 0xe8, 0x0a, 0x0c, 0x44, 0x4e, + 0xb0, 0x41, 0x22, 0xb1, 0x01, 0x66, 0x6e, 0x54, 0xbc, 0x24, 0xa6, 0x2b, 0x92, 0x78, 0x75, 0x12, + 0x1f, 0x0b, 0x6b, 0xac, 0x28, 0x16, 0x2c, 0xec, 0xff, 0x3e, 0x08, 0x27, 0x17, 0x6a, 0x95, 0x9c, + 0x79, 0x75, 0x0e, 0x06, 0x1a, 0x81, 0xbb, 0x43, 0x02, 0xd1, 0xcf, 0x8a, 0xcb, 0x22, 0x83, 0x62, + 0x81, 0x45, 0x2f, 0xc1, 0x08, 0x3f, 0x90, 0x2e, 0x3b, 0x5e, 0xa3, 0x29, 0xbb, 0xf8, 0x98, 0xa0, + 0x1e, 0xb9, 0xa1, 0xe1, 0xb0, 0x41, 0x79, 0xc0, 0x49, 0x75, 0x2e, 0xb1, 0x18, 0xf3, 0x0e, 0xbb, + 0x2f, 0x58, 0x30, 0xc1, 0xab, 0x99, 0x8b, 0xa2, 0xc0, 0xbd, 0xd5, 0x8e, 0x48, 0x38, 0xdd, 0xcf, + 0x76, 0xba, 0x85, 0xac, 0xde, 0xca, 0xed, 0x81, 0xd9, 0x1b, 0x09, 0x2e, 0x7c, 0x13, 0x9c, 0x16, + 0xf5, 0x4e, 0x24, 0xd1, 0x38, 0x55, 0x2d, 0xfa, 0x3e, 0x0b, 0x66, 0xea, 0xbe, 0x17, 0x05, 0x7e, + 0xb3, 0x49, 0x82, 0x6a, 0xfb, 0x56, 0xd3, 0x0d, 0x37, 0xf9, 0x3c, 0xc5, 0x64, 0x9d, 0xed, 0x04, + 0x39, 0x63, 0xa8, 0x88, 0xc4, 0x18, 0x9e, 0xb9, 0xb7, 0x57, 0x9e, 0x59, 0xc8, 0x65, 0x85, 0x3b, + 0x54, 0x83, 0xb6, 0x00, 0xd1, 0xa3, 0xb4, 0x16, 0x39, 0x1b, 0x24, 0xae, 0x7c, 0xb0, 0xf7, 0xca, + 0x4f, 0xdc, 0xdb, 0x2b, 0xa3, 0xd5, 0x14, 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x0d, 0xc7, 0x28, 0x34, + 0xf5, 0xad, 0x43, 0xbd, 0x57, 0x37, 0x7d, 0x6f, 0xaf, 0x7c, 0x6c, 0x35, 0x83, 0x09, 0xce, 0x64, + 0x8d, 0xbe, 0xc7, 0x82, 0x93, 0xf1, 0xe7, 0x2f, 0xdd, 0x69, 0x39, 0x5e, 0x23, 0xae, 0xb8, 0xd4, + 0x7b, 0xc5, 0x74, 0x4f, 0x3e, 0xb9, 0x90, 0xc7, 0x09, 0xe7, 0x57, 0x82, 0x3c, 0x98, 0xa2, 0x4d, + 0x4b, 0xd6, 0x0d, 0xbd, 0xd7, 0xfd, 0xd0, 0xbd, 0xbd, 0xf2, 0xd4, 0x6a, 0x9a, 0x07, 0xce, 0x62, + 0x3c, 0xb3, 0x00, 0xc7, 0x33, 0x67, 0x27, 0x9a, 0x80, 0xe2, 0x16, 0xe1, 0x52, 0x57, 0x09, 0xd3, + 0x9f, 0xe8, 0x18, 0xf4, 0xef, 0x38, 0xcd, 0xb6, 0x58, 0x98, 0x98, 0xff, 0x79, 0xb9, 0xf0, 0x92, + 0x65, 0xff, 0xd3, 0x22, 0x8c, 0x2f, 0xd4, 0x2a, 0xf7, 0xb5, 0xea, 0xf5, 0x63, 0xaf, 0xd0, 0xf1, + 0xd8, 0x8b, 0x0f, 0xd1, 0x62, 0xee, 0x21, 0xfa, 0xdd, 0x19, 0x4b, 0xb6, 0x8f, 0x2d, 0xd9, 0x0f, + 0xe5, 0x2c, 0xd9, 0x43, 0x5e, 0xa8, 0x3b, 0x39, 0xb3, 0xb6, 0x9f, 0x0d, 0x60, 0xa6, 0x84, 0x74, + 0xd5, 0xaf, 0x3b, 0xcd, 0xe4, 0x56, 0x7b, 0xc0, 0xa9, 0x7b, 0x38, 0xe3, 0x58, 0x87, 0x91, 0x05, + 0xa7, 0xe5, 0xdc, 0x72, 0x9b, 0x6e, 0xe4, 0x92, 0x10, 0x3d, 0x01, 0x45, 0xa7, 0xd1, 0x60, 0xd2, + 0x5d, 0x69, 0xfe, 0xf8, 0xbd, 0xbd, 0x72, 0x71, 0xae, 0x41, 0xc5, 0x0c, 0x50, 0x54, 0xbb, 0x98, + 0x52, 0xa0, 0xa7, 0xa0, 0xaf, 0x11, 0xf8, 0xad, 0xe9, 0x02, 0xa3, 0xa4, 0xab, 0xbc, 0x6f, 0x31, + 0xf0, 0x5b, 0x09, 0x52, 0x46, 0x63, 0xff, 0x56, 0x01, 0x4e, 0x2d, 0x90, 0xd6, 0xe6, 0x72, 0x2d, + 0xe7, 0xbc, 0x38, 0x0f, 0x43, 0xdb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xaa, 0xd9, 0x8c, 0x58, + 0x11, 0x30, 0xac, 0xb0, 0xe8, 0x2c, 0xf4, 0xb5, 0x62, 0x21, 0x76, 0x44, 0x0a, 0xc0, 0x4c, 0x7c, + 0x65, 0x18, 0x4a, 0xd1, 0x0e, 0x49, 0x20, 0x66, 0x8c, 0xa2, 0xb8, 0x1e, 0x92, 0x00, 0x33, 0x4c, + 0x2c, 0x09, 0x50, 0x19, 0x41, 0x9c, 0x08, 0x09, 0x49, 0x80, 0x62, 0xb0, 0x46, 0x85, 0xaa, 0x50, + 0x0a, 0x13, 0x23, 0xdb, 0xd3, 0xd2, 0x1c, 0x65, 0xa2, 0x82, 0x1a, 0xc9, 0x98, 0x89, 0x71, 0x82, + 0x0d, 0x74, 0x15, 0x15, 0xbe, 0x5e, 0x00, 0xc4, 0xbb, 0xf0, 0xdb, 0xac, 0xe3, 0xae, 0xa7, 0x3b, + 0xae, 0xf7, 0x25, 0x71, 0x58, 0xbd, 0xf7, 0x9f, 0x2d, 0x38, 0xb5, 0xe0, 0x7a, 0x0d, 0x12, 0xe4, + 0x4c, 0xc0, 0x07, 0x73, 0x77, 0x3e, 0x98, 0x90, 0x62, 0x4c, 0xb1, 0xbe, 0x43, 0x98, 0x62, 0xf6, + 0x5f, 0x5a, 0x80, 0xf8, 0x67, 0xbf, 0xeb, 0x3e, 0xf6, 0x7a, 0xfa, 0x63, 0x0f, 0x61, 0x5a, 0xd8, + 0x7f, 0xc7, 0x82, 0xe1, 0x85, 0xa6, 0xe3, 0x6e, 0x8b, 0x4f, 0x5d, 0x80, 0x49, 0xa9, 0x28, 0x62, + 0x60, 0x4d, 0xf6, 0xa7, 0x9b, 0xdb, 0x24, 0x4e, 0x22, 0x71, 0x9a, 0x1e, 0x7d, 0x1c, 0x4e, 0x1a, + 0xc0, 0x35, 0xb2, 0xdd, 0x6a, 0x3a, 0x91, 0x7e, 0x2b, 0x60, 0xa7, 0x3f, 0xce, 0x23, 0xc2, 0xf9, + 0xe5, 0xed, 0xab, 0x30, 0xb6, 0xd0, 0x74, 0x89, 0x17, 0x55, 0xaa, 0x0b, 0xbe, 0xb7, 0xee, 0x6e, + 0xa0, 0x97, 0x61, 0x2c, 0x72, 0xb7, 0x89, 0xdf, 0x8e, 0x6a, 0xa4, 0xee, 0x7b, 0xec, 0xae, 0x6d, + 0x9d, 0xef, 0x9f, 0x47, 0xf7, 0xf6, 0xca, 0x63, 0x6b, 0x06, 0x06, 0x27, 0x28, 0xed, 0x3f, 0xa2, + 0x23, 0xee, 0x6f, 0xb7, 0x7c, 0x8f, 0x78, 0xd1, 0x82, 0xef, 0x35, 0xb8, 0x4e, 0xe6, 0x65, 0xe8, + 0x8b, 0xe8, 0x08, 0xf2, 0x2f, 0x3f, 0x27, 0x97, 0x36, 0x1d, 0xb7, 0xfd, 0xbd, 0xf2, 0x89, 0x74, + 0x09, 0x36, 0xb2, 0xac, 0x0c, 0xfa, 0x10, 0x0c, 0x84, 0x91, 0x13, 0xb5, 0x43, 0xf1, 0xa9, 0x8f, + 0xc8, 0xf1, 0xaf, 0x31, 0xe8, 0xfe, 0x5e, 0x79, 0x5c, 0x15, 0xe3, 0x20, 0x2c, 0x0a, 0xa0, 0x27, + 0x61, 0x70, 0x9b, 0x84, 0xa1, 0xb3, 0x21, 0xcf, 0xef, 0x71, 0x51, 0x76, 0x70, 0x85, 0x83, 0xb1, + 0xc4, 0xa3, 0x47, 0xa1, 0x9f, 0x04, 0x81, 0x1f, 0x88, 0x5d, 0x65, 0x54, 0x10, 0xf6, 0x2f, 0x51, + 0x20, 0xe6, 0x38, 0xfb, 0x5f, 0x5a, 0x30, 0xae, 0xda, 0xca, 0xeb, 0x3a, 0x82, 0x7b, 0xd3, 0x9b, + 0x00, 0x75, 0xf9, 0x81, 0x21, 0x3b, 0xef, 0x86, 0x9f, 0x3b, 0x97, 0x29, 0x5a, 0xa4, 0xba, 0x31, + 0xe6, 0xac, 0x40, 0x21, 0xd6, 0xb8, 0xd9, 0xbf, 0x6e, 0xc1, 0x54, 0xe2, 0x8b, 0xae, 0xba, 0x61, + 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x6c, 0x6f, 0x5f, 0x45, 0x4b, 0xb3, 0x6f, 0x52, 0x8b, 0x4f, 0x42, + 0xb4, 0x2f, 0xba, 0x0c, 0xfd, 0x6e, 0x44, 0xb6, 0xe5, 0xc7, 0x3c, 0xda, 0xf1, 0x63, 0x78, 0xab, + 0xe2, 0x11, 0xa9, 0xd0, 0x92, 0x98, 0x33, 0xb0, 0x7f, 0xab, 0x08, 0x25, 0x3e, 0x6d, 0x57, 0x9c, + 0xd6, 0x11, 0x8c, 0xc5, 0xd3, 0x50, 0x72, 0xb7, 0xb7, 0xdb, 0x91, 0x73, 0x4b, 0x1c, 0x40, 0x43, + 0x7c, 0x33, 0xa8, 0x48, 0x20, 0x8e, 0xf1, 0xa8, 0x02, 0x7d, 0xac, 0x29, 0xfc, 0x2b, 0x9f, 0xc8, + 0xfe, 0x4a, 0xd1, 0xf6, 0xd9, 0x45, 0x27, 0x72, 0xb8, 0xec, 0xa7, 0x4e, 0x3e, 0x0a, 0xc2, 0x8c, + 0x05, 0x72, 0x00, 0x6e, 0xb9, 0x9e, 0x13, 0xec, 0x52, 0xd8, 0x74, 0x91, 0x31, 0x7c, 0xb6, 0x33, + 0xc3, 0x79, 0x45, 0xcf, 0xd9, 0xaa, 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xf3, 0x41, 0x28, 0x29, + 0xe2, 0x83, 0x88, 0x70, 0x33, 0x1f, 0x86, 0xf1, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xe8, 0x12, 0xe0, + 0xaf, 0xb2, 0x2d, 0x43, 0xb4, 0x7a, 0xc9, 0xdb, 0x11, 0x3b, 0xe7, 0x5d, 0x38, 0xd6, 0xcc, 0xd8, + 0x7b, 0xc5, 0xb8, 0xf6, 0xbe, 0x57, 0x9f, 0x12, 0x9f, 0x7d, 0x2c, 0x0b, 0x8b, 0x33, 0xeb, 0xa0, + 0x52, 0x8d, 0xdf, 0xa2, 0x0b, 0xc4, 0x69, 0xea, 0x17, 0x84, 0x6b, 0x02, 0x86, 0x15, 0x96, 0xee, + 0x77, 0xc7, 0x54, 0xe3, 0xaf, 0x90, 0xdd, 0x1a, 0x69, 0x92, 0x7a, 0xe4, 0x07, 0xdf, 0xd2, 0xe6, + 0x9f, 0xe6, 0xbd, 0xcf, 0xb7, 0xcb, 0x61, 0xc1, 0xa0, 0x78, 0x85, 0xec, 0xf2, 0xa1, 0xd0, 0xbf, + 0xae, 0xd8, 0xf1, 0xeb, 0xbe, 0x6a, 0xc1, 0xa8, 0xfa, 0xba, 0x23, 0xd8, 0x17, 0xe6, 0xcd, 0x7d, + 0xe1, 0x74, 0xc7, 0x09, 0x9e, 0xb3, 0x23, 0x7c, 0xbd, 0x00, 0x27, 0x15, 0x0d, 0xbd, 0xcd, 0xf0, + 0x3f, 0x62, 0x56, 0x5d, 0x80, 0x92, 0xa7, 0xf4, 0x7a, 0x96, 0xa9, 0x50, 0x8b, 0xb5, 0x7a, 0x31, + 0x0d, 0x15, 0x4a, 0xbd, 0xf8, 0x98, 0x1d, 0xd1, 0x15, 0xde, 0x42, 0xb9, 0x3d, 0x0f, 0xc5, 0xb6, + 0xdb, 0x10, 0x07, 0xcc, 0xfb, 0x65, 0x6f, 0x5f, 0xaf, 0x2c, 0xee, 0xef, 0x95, 0x1f, 0xc9, 0x33, + 0xb6, 0xd0, 0x93, 0x2d, 0x9c, 0xbd, 0x5e, 0x59, 0xc4, 0xb4, 0x30, 0x9a, 0x83, 0x71, 0x79, 0x42, + 0xdf, 0xa0, 0x02, 0xa2, 0xef, 0x89, 0x73, 0x48, 0x69, 0xad, 0xb1, 0x89, 0xc6, 0x49, 0x7a, 0xb4, + 0x08, 0x13, 0x5b, 0xed, 0x5b, 0xa4, 0x49, 0x22, 0xfe, 0xc1, 0x57, 0x08, 0xd7, 0xe9, 0x96, 0xe2, + 0xbb, 0xe4, 0x95, 0x04, 0x1e, 0xa7, 0x4a, 0xd8, 0x7f, 0xc3, 0xce, 0x03, 0xd1, 0x7b, 0xd5, 0xc0, + 0xa7, 0x13, 0x8b, 0x72, 0xff, 0x56, 0x4e, 0xe7, 0x5e, 0x66, 0xc5, 0x15, 0xb2, 0xbb, 0xe6, 0xd3, + 0xbb, 0x44, 0xf6, 0xac, 0x30, 0xe6, 0x7c, 0x5f, 0xc7, 0x39, 0xff, 0x8b, 0x05, 0x38, 0xae, 0x7a, + 0xc0, 0x10, 0x5b, 0xbf, 0xdd, 0xfb, 0xe0, 0x22, 0x0c, 0x37, 0xc8, 0xba, 0xd3, 0x6e, 0x46, 0xca, + 0xc0, 0xd0, 0xcf, 0x8d, 0x4c, 0x8b, 0x31, 0x18, 0xeb, 0x34, 0x07, 0xe8, 0xb6, 0x9f, 0x1f, 0x65, + 0x07, 0x71, 0xe4, 0xd0, 0x39, 0xae, 0x56, 0x8d, 0x95, 0xbb, 0x6a, 0x1e, 0x85, 0x7e, 0x77, 0x9b, + 0x0a, 0x66, 0x05, 0x53, 0xde, 0xaa, 0x50, 0x20, 0xe6, 0x38, 0xf4, 0x38, 0x0c, 0xd6, 0xfd, 0xed, + 0x6d, 0xc7, 0x6b, 0xb0, 0x23, 0xaf, 0x34, 0x3f, 0x4c, 0x65, 0xb7, 0x05, 0x0e, 0xc2, 0x12, 0x87, + 0x4e, 0x41, 0x9f, 0x13, 0x6c, 0x70, 0xad, 0x4b, 0x69, 0x7e, 0x88, 0xd6, 0x34, 0x17, 0x6c, 0x84, + 0x98, 0x41, 0xe9, 0xa5, 0xf1, 0xb6, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x25, 0xa1, + 0xce, 0xc2, 0x9b, 0x0a, 0x83, 0x35, 0x2a, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9, 0x01, + 0xd6, 0xdd, 0x8f, 0xe4, 0x6c, 0x44, 0xfc, 0x6b, 0xab, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, + 0x98, 0x17, 0x47, 0x57, 0x61, 0x90, 0x78, 0x3b, 0xcb, 0x81, 0xbf, 0x3d, 0x3d, 0x95, 0xcf, 0x69, + 0x89, 0x93, 0xf0, 0x69, 0x16, 0xcb, 0xa8, 0x02, 0x8c, 0x25, 0x0b, 0xf4, 0x21, 0x28, 0x12, 0x6f, + 0x67, 0x7a, 0x90, 0x71, 0x9a, 0xc9, 0xe1, 0x74, 0xc3, 0x09, 0xe2, 0x3d, 0x7f, 0xc9, 0xdb, 0xc1, + 0xb4, 0x0c, 0xfa, 0x18, 0x94, 0xe4, 0x86, 0x11, 0x0a, 0x75, 0x66, 0xe6, 0x84, 0x95, 0xdb, 0x0c, + 0x26, 0x6f, 0xb7, 0xdd, 0x80, 0x6c, 0x13, 0x2f, 0x0a, 0xe3, 0x1d, 0x52, 0x62, 0x43, 0x1c, 0x73, + 0x43, 0x75, 0x18, 0x09, 0x48, 0xe8, 0xde, 0x25, 0x55, 0xbf, 0xe9, 0xd6, 0x77, 0xa7, 0x1f, 0x62, + 0xcd, 0x7b, 0xb2, 0x63, 0x97, 0x61, 0xad, 0x40, 0xac, 0x6e, 0xd7, 0xa1, 0xd8, 0x60, 0x8a, 0xde, + 0x80, 0xd1, 0x80, 0x84, 0x91, 0x13, 0x44, 0xa2, 0x96, 0x69, 0x65, 0x1e, 0x1b, 0xc5, 0x3a, 0x82, + 0x5f, 0x27, 0xe2, 0x6a, 0x62, 0x0c, 0x36, 0x39, 0xa0, 0x8f, 0x49, 0xdd, 0xff, 0x8a, 0xdf, 0xf6, + 0xa2, 0x70, 0xba, 0xc4, 0xda, 0x9d, 0x69, 0x95, 0xbd, 0x11, 0xd3, 0x25, 0x8d, 0x03, 0xbc, 0x30, + 0x36, 0x58, 0xa1, 0x4f, 0xc0, 0x28, 0xff, 0xcf, 0x6d, 0x9b, 0xe1, 0xf4, 0x71, 0xc6, 0xfb, 0x6c, + 0x3e, 0x6f, 0x4e, 0x38, 0x7f, 0x5c, 0x30, 0x1f, 0xd5, 0xa1, 0x21, 0x36, 0xb9, 0x21, 0x0c, 0xa3, + 0x4d, 0x77, 0x87, 0x78, 0x24, 0x0c, 0xab, 0x81, 0x7f, 0x8b, 0x08, 0x55, 0xed, 0xc9, 0x6c, 0x5b, + 0xa8, 0x7f, 0x8b, 0xcc, 0x4f, 0x52, 0x9e, 0x57, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0xae, 0xc3, 0x18, + 0xbd, 0x1b, 0xbb, 0x31, 0xd3, 0xe1, 0x6e, 0x4c, 0xd9, 0x7d, 0x10, 0x1b, 0x85, 0x70, 0x82, 0x09, + 0xba, 0x06, 0x23, 0xac, 0xcf, 0xdb, 0x2d, 0xce, 0xf4, 0x44, 0x37, 0xa6, 0xcc, 0x94, 0x5e, 0xd3, + 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x1d, 0x4a, 0x4d, 0x77, 0x9d, 0xd4, 0x77, 0xeb, 0x4d, 0x32, 0x3d, + 0xc2, 0xb8, 0x65, 0x6e, 0x86, 0x57, 0x25, 0x11, 0x97, 0xcf, 0xd5, 0x5f, 0x1c, 0x17, 0x47, 0x37, + 0xe0, 0x44, 0x44, 0x82, 0x6d, 0xd7, 0x73, 0xe8, 0x26, 0x26, 0xae, 0x84, 0xcc, 0x44, 0x3d, 0xca, + 0x66, 0xd7, 0x19, 0x31, 0x1a, 0x27, 0xd6, 0x32, 0xa9, 0x70, 0x4e, 0x69, 0x74, 0x07, 0xa6, 0x33, + 0x30, 0x7c, 0xde, 0x1e, 0x63, 0x9c, 0x5f, 0x15, 0x9c, 0xa7, 0xd7, 0x72, 0xe8, 0xf6, 0x3b, 0xe0, + 0x70, 0x2e, 0x77, 0x74, 0x0d, 0xc6, 0xd9, 0xce, 0x59, 0x6d, 0x37, 0x9b, 0xa2, 0xc2, 0x31, 0x56, + 0xe1, 0xe3, 0x52, 0x8e, 0xa8, 0x98, 0xe8, 0xfd, 0xbd, 0x32, 0xc4, 0xff, 0x70, 0xb2, 0x34, 0xba, + 0xc5, 0xac, 0xa1, 0xed, 0xc0, 0x8d, 0x76, 0xe9, 0xaa, 0x22, 0x77, 0xa2, 0xe9, 0xf1, 0x8e, 0x9a, + 0x21, 0x9d, 0x54, 0x99, 0x4c, 0x75, 0x20, 0x4e, 0x32, 0xa4, 0x47, 0x41, 0x18, 0x35, 0x5c, 0x6f, + 0x7a, 0x82, 0xdf, 0xa7, 0xe4, 0x4e, 0x5a, 0xa3, 0x40, 0xcc, 0x71, 0xcc, 0x12, 0x4a, 0x7f, 0x5c, + 0xa3, 0x27, 0xee, 0x24, 0x23, 0x8c, 0x2d, 0xa1, 0x12, 0x81, 0x63, 0x1a, 0x2a, 0x04, 0x47, 0xd1, + 0xee, 0x34, 0x62, 0xa4, 0x6a, 0x43, 0x5c, 0x5b, 0xfb, 0x18, 0xa6, 0x70, 0xfb, 0x16, 0x8c, 0xa9, + 0x6d, 0x82, 0xf5, 0x09, 0x2a, 0x43, 0x3f, 0x13, 0xfb, 0x84, 0x1e, 0xb3, 0x44, 0x9b, 0xc0, 0x44, + 0x42, 0xcc, 0xe1, 0xac, 0x09, 0xee, 0x5d, 0x32, 0xbf, 0x1b, 0x11, 0xae, 0x8b, 0x28, 0x6a, 0x4d, + 0x90, 0x08, 0x1c, 0xd3, 0xd8, 0xff, 0x83, 0x8b, 0xcf, 0xf1, 0x29, 0xd1, 0xc3, 0xb9, 0xf8, 0x0c, + 0x0c, 0x6d, 0xfa, 0x61, 0x44, 0xa9, 0x59, 0x1d, 0xfd, 0xb1, 0xc0, 0x7c, 0x59, 0xc0, 0xb1, 0xa2, + 0x40, 0xaf, 0xc0, 0x68, 0x5d, 0xaf, 0x40, 0x1c, 0xea, 0x6a, 0x1b, 0x31, 0x6a, 0xc7, 0x26, 0x2d, + 0x7a, 0x09, 0x86, 0x98, 0x77, 0x4f, 0xdd, 0x6f, 0x0a, 0x69, 0x53, 0x4a, 0x26, 0x43, 0x55, 0x01, + 0xdf, 0xd7, 0x7e, 0x63, 0x45, 0x8d, 0xce, 0xc1, 0x00, 0x6d, 0x42, 0xa5, 0x2a, 0x8e, 0x53, 0xa5, + 0x92, 0xbb, 0xcc, 0xa0, 0x58, 0x60, 0xed, 0x5f, 0xb7, 0x98, 0x2c, 0x95, 0xde, 0xf3, 0xd1, 0x65, + 0x76, 0x68, 0xb0, 0x13, 0x44, 0x53, 0x89, 0x3d, 0xa6, 0x9d, 0x04, 0x0a, 0xb7, 0x9f, 0xf8, 0x8f, + 0x8d, 0x92, 0xe8, 0xcd, 0xe4, 0xc9, 0xc0, 0x05, 0x8a, 0x17, 0x64, 0x17, 0x24, 0x4f, 0x87, 0x87, + 0xe3, 0x23, 0x8e, 0xb6, 0xa7, 0xd3, 0x11, 0x61, 0xff, 0x9f, 0x05, 0x6d, 0x96, 0xd4, 0x22, 0x27, + 0x22, 0xa8, 0x0a, 0x83, 0xb7, 0x1d, 0x37, 0x72, 0xbd, 0x0d, 0x21, 0xf7, 0x75, 0x3e, 0xe8, 0x58, + 0xa1, 0x9b, 0xbc, 0x00, 0x97, 0x5e, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xdb, 0xf3, 0x28, + 0xc7, 0x42, 0xaf, 0x1c, 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x01, 0xc8, + 0x1d, 0x82, 0x34, 0x84, 0x57, 0xd0, 0x33, 0xdd, 0x99, 0xae, 0xa9, 0x32, 0xf3, 0x63, 0x54, 0x36, + 0x8a, 0xff, 0x63, 0x8d, 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x9c, 0x2e, 0x51, 0x27, + 0x88, 0x48, 0x63, 0x2e, 0x12, 0x9d, 0xf3, 0x54, 0x6f, 0x97, 0xc3, 0x35, 0x77, 0x9b, 0xe8, 0xcb, + 0x59, 0x30, 0xc1, 0x31, 0x3f, 0xfb, 0x97, 0x8b, 0x30, 0x9d, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x1d, + 0x37, 0x5a, 0xa0, 0x62, 0xad, 0x65, 0x2e, 0x9a, 0x25, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, + 0xdd, 0x90, 0x77, 0xfb, 0xfe, 0x78, 0xf6, 0xd6, 0x18, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, + 0x0a, 0xb7, 0x33, 0x6d, 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0x6b, 0x19, 0xfb, 0xba, 0x68, 0x19, + 0x8d, 0x2e, 0xea, 0x3f, 0xdc, 0x2e, 0x42, 0x9f, 0x04, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xdc, + 0x07, 0x0e, 0xcc, 0x5d, 0x09, 0xc5, 0xcb, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x17, 0x61, 0x58, 0x6d, + 0x20, 0x95, 0x45, 0x66, 0x83, 0xd7, 0x7c, 0x9a, 0xe2, 0xdd, 0x74, 0x11, 0xeb, 0x74, 0xf6, 0xa7, + 0x93, 0xf3, 0x45, 0xac, 0x00, 0xad, 0x7f, 0xad, 0x5e, 0xfb, 0xb7, 0xd0, 0xb9, 0x7f, 0xed, 0x6f, + 0x0e, 0xc0, 0xb8, 0x51, 0x59, 0x3b, 0xec, 0x61, 0xcf, 0xbd, 0x44, 0x0f, 0x20, 0x27, 0x22, 0x62, + 0xfd, 0xd9, 0xdd, 0x97, 0x8a, 0x7e, 0x48, 0xd1, 0x15, 0xc0, 0xcb, 0xa3, 0x4f, 0x42, 0xa9, 0xe9, + 0x84, 0x4c, 0x63, 0x49, 0xc4, 0xba, 0xeb, 0x85, 0x59, 0x7c, 0x21, 0x74, 0xc2, 0x48, 0x3b, 0xf5, + 0x39, 0xef, 0x98, 0x25, 0x3d, 0x29, 0xa9, 0x7c, 0x25, 0xfd, 0x1a, 0x55, 0x23, 0xa8, 0x10, 0xb6, + 0x8b, 0x39, 0x0e, 0xbd, 0xc4, 0xb6, 0x56, 0x3a, 0x2b, 0x16, 0xa8, 0x34, 0xca, 0xa6, 0x59, 0xbf, + 0x21, 0x64, 0x2b, 0x1c, 0x36, 0x28, 0xe3, 0x3b, 0xd9, 0x40, 0x87, 0x3b, 0xd9, 0x93, 0x30, 0xc8, + 0x7e, 0xa8, 0x19, 0xa0, 0x46, 0xa3, 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0x13, 0x66, 0xa8, 0xb7, 0x09, + 0x43, 0x6f, 0x7d, 0x62, 0x52, 0x33, 0xff, 0x87, 0x21, 0xbe, 0xcb, 0x89, 0x29, 0x8f, 0x25, 0x0e, + 0xfd, 0xb4, 0x05, 0xc8, 0x69, 0xd2, 0xdb, 0x32, 0x05, 0xab, 0xcb, 0x0d, 0x30, 0x51, 0xfb, 0x95, + 0xae, 0xdd, 0xde, 0x0e, 0x67, 0xe7, 0x52, 0xa5, 0xb9, 0xa6, 0xf4, 0x65, 0xd1, 0x44, 0x94, 0x26, + 0xd0, 0x0f, 0xa3, 0xab, 0x6e, 0x18, 0x7d, 0xee, 0x8f, 0x13, 0x87, 0x53, 0x46, 0x93, 0xd0, 0x75, + 0xfd, 0xf2, 0x35, 0x7c, 0xc0, 0xcb, 0xd7, 0x68, 0xde, 0xc5, 0x6b, 0xa6, 0x0d, 0x0f, 0xe5, 0x7c, + 0x41, 0x86, 0xfe, 0x75, 0x51, 0xd7, 0xbf, 0x76, 0xd1, 0xda, 0xcd, 0xca, 0x3a, 0x66, 0xdf, 0x68, + 0x3b, 0x5e, 0xe4, 0x46, 0xbb, 0xba, 0xbe, 0xf6, 0x29, 0x18, 0x5b, 0x74, 0xc8, 0xb6, 0xef, 0x2d, + 0x79, 0x8d, 0x96, 0xef, 0x7a, 0x11, 0x9a, 0x86, 0x3e, 0x26, 0x7c, 0xf0, 0xad, 0xb7, 0x8f, 0xf6, + 0x1e, 0x66, 0x10, 0x7b, 0x03, 0x8e, 0x2f, 0xfa, 0xb7, 0xbd, 0xdb, 0x4e, 0xd0, 0x98, 0xab, 0x56, + 0x34, 0x7d, 0xd2, 0xaa, 0xd4, 0x67, 0x58, 0xf9, 0xb7, 0x45, 0xad, 0x24, 0xbf, 0x0e, 0x2d, 0xbb, + 0x4d, 0x92, 0xa3, 0xf5, 0xfb, 0x7f, 0x0a, 0x46, 0x4d, 0x31, 0xbd, 0xb2, 0x3b, 0x5b, 0xb9, 0x76, + 0xe7, 0x37, 0x60, 0x68, 0xdd, 0x25, 0xcd, 0x06, 0x26, 0xeb, 0xa2, 0x77, 0x9e, 0xc8, 0xf7, 0x4c, + 0x5b, 0xa6, 0x94, 0x52, 0xcb, 0xcb, 0xb5, 0x21, 0xcb, 0xa2, 0x30, 0x56, 0x6c, 0xd0, 0x16, 0x4c, + 0xc8, 0x3e, 0x94, 0x58, 0xb1, 0x1f, 0x3c, 0xd9, 0x69, 0xe0, 0x4d, 0xe6, 0xc7, 0xee, 0xed, 0x95, + 0x27, 0x70, 0x82, 0x0d, 0x4e, 0x31, 0x46, 0xa7, 0xa0, 0x6f, 0x9b, 0x9e, 0x7c, 0x7d, 0xac, 0xfb, + 0x99, 0xfa, 0x83, 0x69, 0x72, 0x18, 0xd4, 0xfe, 0x31, 0x0b, 0x1e, 0x4a, 0xf5, 0x8c, 0xd0, 0x68, + 0x1d, 0xf2, 0x28, 0x24, 0x35, 0x4c, 0x85, 0xee, 0x1a, 0x26, 0xfb, 0xe7, 0x2c, 0x38, 0xb6, 0xb4, + 0xdd, 0x8a, 0x76, 0x17, 0x5d, 0xd3, 0x48, 0xfc, 0x41, 0x18, 0xd8, 0x26, 0x0d, 0xb7, 0xbd, 0x2d, + 0x46, 0xae, 0x2c, 0x4f, 0x87, 0x15, 0x06, 0xdd, 0xdf, 0x2b, 0x8f, 0xd6, 0x22, 0x3f, 0x70, 0x36, + 0x08, 0x07, 0x60, 0x41, 0xce, 0xce, 0x58, 0xf7, 0x2e, 0xb9, 0xea, 0x6e, 0xbb, 0xd1, 0xfd, 0xcd, + 0x76, 0x61, 0xdf, 0x95, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0x86, 0x05, 0xe3, 0x72, 0xde, 0xcf, 0x35, + 0x1a, 0x01, 0x09, 0x43, 0x34, 0x03, 0x05, 0xb7, 0x25, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0x4a, 0x15, + 0x17, 0xdc, 0x96, 0x14, 0xe7, 0xd9, 0x01, 0x54, 0x34, 0x4d, 0xdd, 0x97, 0x05, 0x1c, 0x2b, 0x0a, + 0x74, 0x1e, 0x86, 0x3c, 0xbf, 0xc1, 0x25, 0x62, 0x2e, 0x4a, 0xb0, 0x09, 0xb6, 0x2a, 0x60, 0x58, + 0x61, 0x51, 0x15, 0x4a, 0xdc, 0x11, 0x32, 0x9e, 0xb4, 0x3d, 0xb9, 0x53, 0xb2, 0x2f, 0x5b, 0x93, + 0x25, 0x71, 0xcc, 0xc4, 0xfe, 0x4d, 0x0b, 0x46, 0xe4, 0x97, 0xf5, 0x78, 0x57, 0xa1, 0x4b, 0x2b, + 0xbe, 0xa7, 0xc4, 0x4b, 0x8b, 0xde, 0x35, 0x18, 0xc6, 0xb8, 0x62, 0x14, 0x0f, 0x74, 0xc5, 0xb8, + 0x08, 0xc3, 0x4e, 0xab, 0x55, 0x35, 0xef, 0x27, 0x6c, 0x2a, 0xcd, 0xc5, 0x60, 0xac, 0xd3, 0xd8, + 0x3f, 0x5a, 0x80, 0x31, 0xf9, 0x05, 0xb5, 0xf6, 0xad, 0x90, 0x44, 0x68, 0x0d, 0x4a, 0x0e, 0x1f, + 0x25, 0x22, 0x27, 0xf9, 0xa3, 0xd9, 0x7a, 0x33, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x4e, 0x96, 0xc6, + 0x31, 0x23, 0xd4, 0x84, 0x49, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0x9d, 0x4c, 0x99, 0x49, 0xee, + 0x27, 0x05, 0xf7, 0xc9, 0xd5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x96, 0xa4, 0x2e, 0xb2, 0x98, 0xaf, + 0x44, 0xd2, 0x07, 0x2e, 0x5b, 0x15, 0x69, 0xff, 0x9a, 0x05, 0x25, 0x49, 0x76, 0x14, 0x56, 0xeb, + 0x15, 0x18, 0x0c, 0xd9, 0x20, 0xc8, 0xae, 0xb1, 0x3b, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, + 0xff, 0x21, 0x96, 0x3c, 0x98, 0x29, 0x4a, 0x35, 0xff, 0x5d, 0x62, 0x8a, 0x52, 0xed, 0xc9, 0x39, + 0x94, 0xfe, 0x8c, 0xb5, 0x59, 0xd3, 0xed, 0x52, 0x91, 0xb7, 0x15, 0x90, 0x75, 0xf7, 0x4e, 0x52, + 0xe4, 0xad, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x16, 0x8c, 0xd4, 0xa5, 0x0d, 0x22, 0x5e, 0xe1, 0xe7, + 0x3a, 0xda, 0xc3, 0x94, 0xe9, 0x94, 0xeb, 0xd0, 0x16, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x3a, 0xfa, + 0x14, 0xbb, 0x39, 0xfa, 0xc4, 0x7c, 0xf3, 0xdd, 0x5e, 0x7e, 0xdc, 0x82, 0x01, 0xae, 0x7b, 0xee, + 0x4d, 0xf5, 0xaf, 0x59, 0x92, 0xe3, 0xbe, 0xbb, 0x41, 0x81, 0x42, 0xd2, 0x40, 0x2b, 0x50, 0x62, + 0x3f, 0x98, 0xee, 0xbc, 0x98, 0xff, 0x0e, 0x87, 0xd7, 0xaa, 0x37, 0xf0, 0x86, 0x2c, 0x86, 0x63, + 0x0e, 0xf6, 0x8f, 0x14, 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xe0, 0x0e, 0xfd, 0xc2, + 0x83, 0x3a, 0xf4, 0x37, 0x60, 0xbc, 0xae, 0xd9, 0x9d, 0xe3, 0x91, 0x3c, 0xdf, 0x71, 0x92, 0x68, + 0x26, 0x6a, 0xae, 0x9d, 0x5b, 0x30, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x71, 0x18, 0xe1, 0xe3, 0x2c, + 0x6a, 0xe1, 0xbe, 0x52, 0x8f, 0xe7, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, + 0x99, 0xfd, 0x57, 0x16, 0xa0, 0xa5, 0xd6, 0x26, 0xd9, 0x26, 0x81, 0xd3, 0x8c, 0xcd, 0x47, 0x5f, + 0xb4, 0x60, 0x9a, 0xa4, 0xc0, 0x0b, 0xfe, 0xf6, 0xb6, 0xb8, 0x2c, 0xe6, 0xe8, 0x33, 0x96, 0x72, + 0xca, 0xa8, 0x87, 0x4a, 0xd3, 0x79, 0x14, 0x38, 0xb7, 0x3e, 0xb4, 0x02, 0x53, 0xfc, 0x94, 0x54, + 0x08, 0xcd, 0xef, 0xea, 0x61, 0xc1, 0x78, 0x6a, 0x2d, 0x4d, 0x82, 0xb3, 0xca, 0xd9, 0xbf, 0x36, + 0x0a, 0xb9, 0xad, 0x78, 0xcf, 0x6e, 0xf6, 0x9e, 0xdd, 0xec, 0x3d, 0xbb, 0xd9, 0x7b, 0x76, 0xb3, + 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, 0xde, 0xa5, 0x76, 0xb3, 0xff, 0xcb, 0x82, 0xe3, 0xea, 0xf8, + 0x32, 0x2e, 0xec, 0x9f, 0x81, 0x29, 0xbe, 0xdc, 0x0c, 0x1f, 0x63, 0x71, 0x5c, 0x5f, 0xcc, 0x9c, + 0xb9, 0x09, 0x5f, 0x78, 0xa3, 0x20, 0x7f, 0x54, 0x94, 0x81, 0xc0, 0x59, 0xd5, 0xd8, 0xbf, 0x3c, + 0x04, 0xfd, 0x4b, 0x3b, 0xc4, 0x8b, 0x8e, 0xe0, 0x6a, 0x53, 0x87, 0x31, 0xd7, 0xdb, 0xf1, 0x9b, + 0x3b, 0xa4, 0xc1, 0xf1, 0x07, 0xb9, 0x81, 0x9f, 0x10, 0xac, 0xc7, 0x2a, 0x06, 0x0b, 0x9c, 0x60, + 0xf9, 0x20, 0xac, 0x0f, 0x97, 0x60, 0x80, 0x1f, 0x3e, 0xc2, 0xf4, 0x90, 0xb9, 0x67, 0xb3, 0x4e, + 0x14, 0x47, 0x6a, 0x6c, 0x19, 0xe1, 0x87, 0x9b, 0x28, 0x8e, 0x3e, 0x0d, 0x63, 0xeb, 0x6e, 0x10, + 0x46, 0x6b, 0xee, 0x36, 0x3d, 0x1a, 0xb6, 0x5b, 0xf7, 0x61, 0x6d, 0x50, 0xfd, 0xb0, 0x6c, 0x70, + 0xc2, 0x09, 0xce, 0x68, 0x03, 0x46, 0x9b, 0x8e, 0x5e, 0xd5, 0xe0, 0x81, 0xab, 0x52, 0xa7, 0xc3, + 0x55, 0x9d, 0x11, 0x36, 0xf9, 0xd2, 0xe5, 0x54, 0x67, 0x0a, 0xf3, 0x21, 0xa6, 0xce, 0x50, 0xcb, + 0x89, 0x6b, 0xca, 0x39, 0x8e, 0x0a, 0x68, 0xcc, 0x91, 0xbd, 0x64, 0x0a, 0x68, 0x9a, 0xbb, 0xfa, + 0xa7, 0xa0, 0x44, 0x68, 0x17, 0x52, 0xc6, 0xe2, 0x80, 0xb9, 0xd0, 0x5b, 0x5b, 0x57, 0xdc, 0x7a, + 0xe0, 0x9b, 0x76, 0x9e, 0x25, 0xc9, 0x09, 0xc7, 0x4c, 0xd1, 0x02, 0x0c, 0x84, 0x24, 0x70, 0x95, + 0x2e, 0xb9, 0xc3, 0x30, 0x32, 0x32, 0xfe, 0x6a, 0x8d, 0xff, 0xc6, 0xa2, 0x28, 0x9d, 0x5e, 0x0e, + 0x53, 0xc5, 0xb2, 0xc3, 0x40, 0x9b, 0x5e, 0x73, 0x0c, 0x8a, 0x05, 0x16, 0xbd, 0x0e, 0x83, 0x01, + 0x69, 0x32, 0x43, 0xe2, 0x68, 0xef, 0x93, 0x9c, 0xdb, 0x25, 0x79, 0x39, 0x2c, 0x19, 0xa0, 0x2b, + 0x80, 0x02, 0x42, 0x05, 0x3c, 0xd7, 0xdb, 0x50, 0xee, 0xdd, 0x62, 0xa3, 0x55, 0x82, 0x34, 0x8e, + 0x29, 0xe4, 0x83, 0x45, 0x9c, 0x51, 0x0c, 0x5d, 0x82, 0x49, 0x05, 0xad, 0x78, 0x61, 0xe4, 0xd0, + 0x0d, 0x6e, 0x9c, 0xf1, 0x52, 0xfa, 0x15, 0x9c, 0x24, 0xc0, 0xe9, 0x32, 0xf6, 0xcf, 0x5a, 0xc0, + 0xfb, 0xf9, 0x08, 0xb4, 0x0a, 0xaf, 0x99, 0x5a, 0x85, 0x93, 0xb9, 0x23, 0x97, 0xa3, 0x51, 0xf8, + 0x59, 0x0b, 0x86, 0xb5, 0x91, 0x8d, 0xe7, 0xac, 0xd5, 0x61, 0xce, 0xb6, 0x61, 0x82, 0xce, 0xf4, + 0x6b, 0xb7, 0x42, 0x12, 0xec, 0x90, 0x06, 0x9b, 0x98, 0x85, 0xfb, 0x9b, 0x98, 0xca, 0x95, 0xf4, + 0x6a, 0x82, 0x21, 0x4e, 0x55, 0x61, 0x7f, 0x4a, 0x36, 0x55, 0x79, 0xde, 0xd6, 0xd5, 0x98, 0x27, + 0x3c, 0x6f, 0xd5, 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x4d, 0x3f, 0x8c, 0x92, 0x9e, 0xb7, 0x97, + 0xfd, 0x30, 0xc2, 0x0c, 0x63, 0x3f, 0x0f, 0xb0, 0x74, 0x87, 0xd4, 0xf9, 0x8c, 0xd5, 0x2f, 0x3d, + 0x56, 0xfe, 0xa5, 0xc7, 0xfe, 0x7d, 0x0b, 0xc6, 0x96, 0x17, 0x8c, 0x93, 0x6b, 0x16, 0x80, 0xdf, + 0xd4, 0x6e, 0xde, 0x5c, 0x95, 0xee, 0x1f, 0xdc, 0x02, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x27, 0xa1, + 0xd8, 0x6c, 0x7b, 0x42, 0xed, 0x39, 0x48, 0x8f, 0xc7, 0xab, 0x6d, 0x0f, 0x53, 0x98, 0xf6, 0x58, + 0xa9, 0xd8, 0xf3, 0x63, 0xa5, 0xae, 0x41, 0x4a, 0x50, 0x19, 0xfa, 0x6f, 0xdf, 0x76, 0x1b, 0xfc, + 0x29, 0xb8, 0x70, 0x4d, 0xb9, 0x79, 0xb3, 0xb2, 0x18, 0x62, 0x0e, 0xb7, 0xbf, 0x54, 0x84, 0x99, + 0xe5, 0x26, 0xb9, 0xf3, 0x0e, 0x9f, 0xc3, 0xf7, 0xfa, 0xd4, 0xea, 0x60, 0x0a, 0xa4, 0x83, 0x3e, + 0xa7, 0xeb, 0xde, 0x1f, 0xeb, 0x30, 0xc8, 0x1d, 0x4f, 0xe5, 0xe3, 0xf8, 0x4c, 0x73, 0x5f, 0x7e, + 0x87, 0xcc, 0x72, 0x07, 0x56, 0x61, 0xee, 0x53, 0x07, 0xa6, 0x80, 0x62, 0xc9, 0x7c, 0xe6, 0x65, + 0x18, 0xd1, 0x29, 0x0f, 0xf4, 0xb0, 0xf5, 0x7b, 0x8b, 0x30, 0x41, 0x5b, 0xf0, 0x40, 0x07, 0xe2, + 0x7a, 0x7a, 0x20, 0x0e, 0xfb, 0x71, 0x63, 0xf7, 0xd1, 0x78, 0x2b, 0x39, 0x1a, 0x17, 0xf3, 0x46, + 0xe3, 0xa8, 0xc7, 0xe0, 0xfb, 0x2c, 0x98, 0x5a, 0x6e, 0xfa, 0xf5, 0xad, 0xc4, 0x03, 0xc4, 0x17, + 0x61, 0x98, 0x6e, 0xc7, 0xa1, 0x11, 0x8b, 0xc3, 0x88, 0xce, 0x22, 0x50, 0x58, 0xa7, 0xd3, 0x8a, + 0x5d, 0xbf, 0x5e, 0x59, 0xcc, 0x0a, 0xea, 0x22, 0x50, 0x58, 0xa7, 0xb3, 0x7f, 0xd7, 0x82, 0xd3, + 0x97, 0x16, 0x96, 0xe2, 0xa9, 0x98, 0x8a, 0x2b, 0x73, 0x0e, 0x06, 0x5a, 0x0d, 0xad, 0x29, 0xb1, + 0x5a, 0x78, 0x91, 0xb5, 0x42, 0x60, 0xdf, 0x2d, 0x31, 0x93, 0xae, 0x03, 0x5c, 0xc2, 0xd5, 0x05, + 0xb1, 0xef, 0x4a, 0x2b, 0x90, 0x95, 0x6b, 0x05, 0x7a, 0x1c, 0x06, 0xe9, 0xb9, 0xe0, 0xd6, 0x65, + 0xbb, 0xb9, 0x41, 0x9f, 0x83, 0xb0, 0xc4, 0xd9, 0x3f, 0x63, 0xc1, 0xd4, 0x25, 0x37, 0xa2, 0x87, + 0x76, 0x32, 0x70, 0x0a, 0x3d, 0xb5, 0x43, 0x37, 0xf2, 0x83, 0xdd, 0x64, 0xe0, 0x14, 0xac, 0x30, + 0x58, 0xa3, 0xe2, 0x1f, 0xb4, 0xe3, 0xb2, 0x97, 0x14, 0x05, 0xd3, 0xee, 0x86, 0x05, 0x1c, 0x2b, + 0x0a, 0xda, 0x5f, 0x0d, 0x37, 0x60, 0x2a, 0xcb, 0x5d, 0xb1, 0x71, 0xab, 0xfe, 0x5a, 0x94, 0x08, + 0x1c, 0xd3, 0xd8, 0x7f, 0x61, 0x41, 0xf9, 0x52, 0xb3, 0x1d, 0x46, 0x24, 0x58, 0x0f, 0x73, 0x36, + 0xdd, 0xe7, 0xa1, 0x44, 0xa4, 0x81, 0x40, 0x3e, 0xf9, 0x94, 0x82, 0xa8, 0xb2, 0x1c, 0xf0, 0xf8, + 0x2d, 0x8a, 0xae, 0x87, 0x57, 0xd2, 0x07, 0x7b, 0xe6, 0xba, 0x0c, 0x88, 0xe8, 0x75, 0xe9, 0x01, + 0x6d, 0x58, 0x64, 0x8c, 0xa5, 0x14, 0x16, 0x67, 0x94, 0xb0, 0x7f, 0xcc, 0x82, 0xe3, 0xea, 0x83, + 0xdf, 0x75, 0x9f, 0x69, 0x7f, 0xad, 0x00, 0xa3, 0x97, 0xd7, 0xd6, 0xaa, 0x97, 0x48, 0xa4, 0xcd, + 0xca, 0xce, 0x66, 0x7f, 0xac, 0x59, 0x2f, 0x3b, 0xdd, 0x11, 0xdb, 0x91, 0xdb, 0x9c, 0xe5, 0x71, + 0xd1, 0x66, 0x2b, 0x5e, 0x74, 0x2d, 0xa8, 0x45, 0x81, 0xeb, 0x6d, 0x64, 0xce, 0x74, 0x29, 0xb3, + 0x14, 0xf3, 0x64, 0x16, 0xf4, 0x3c, 0x0c, 0xb0, 0xc0, 0x6c, 0x72, 0x10, 0x1e, 0x56, 0x57, 0x2c, + 0x06, 0xdd, 0xdf, 0x2b, 0x97, 0xae, 0xe3, 0x0a, 0xff, 0x83, 0x05, 0x29, 0xba, 0x0e, 0xc3, 0x9b, + 0x51, 0xd4, 0xba, 0x4c, 0x9c, 0x06, 0x09, 0xe4, 0x2e, 0x7b, 0x26, 0x6b, 0x97, 0xa5, 0x9d, 0xc0, + 0xc9, 0xe2, 0x8d, 0x29, 0x86, 0x85, 0x58, 0xe7, 0x63, 0xd7, 0x00, 0x62, 0xdc, 0x21, 0x19, 0x6e, + 0xec, 0x35, 0x28, 0xd1, 0xcf, 0x9d, 0x6b, 0xba, 0x4e, 0x67, 0xd3, 0xf8, 0xd3, 0x50, 0x92, 0x86, + 0xef, 0x50, 0x44, 0x71, 0x60, 0x27, 0x92, 0xb4, 0x8b, 0x87, 0x38, 0xc6, 0xdb, 0x8f, 0x81, 0xf0, + 0x2d, 0xed, 0xc4, 0xd2, 0x5e, 0x87, 0x63, 0xcc, 0x49, 0xd6, 0x89, 0x36, 0x8d, 0x39, 0xda, 0x7d, + 0x32, 0x3c, 0x23, 0xee, 0x75, 0xfc, 0xcb, 0xa6, 0xb5, 0xc7, 0xc9, 0x23, 0x92, 0x63, 0x7c, 0xc7, + 0xb3, 0xff, 0xbc, 0x0f, 0x1e, 0xae, 0xd4, 0xf2, 0xc3, 0x0f, 0xbd, 0x04, 0x23, 0x5c, 0x5c, 0xa4, + 0x53, 0xc3, 0x69, 0x8a, 0x7a, 0x95, 0x06, 0x74, 0x4d, 0xc3, 0x61, 0x83, 0x12, 0x9d, 0x86, 0xa2, + 0xfb, 0xb6, 0x97, 0x7c, 0xba, 0x57, 0x79, 0x63, 0x15, 0x53, 0x38, 0x45, 0x53, 0xc9, 0x93, 0x6f, + 0xe9, 0x0a, 0xad, 0xa4, 0xcf, 0xd7, 0x60, 0xcc, 0x0d, 0xeb, 0xa1, 0x5b, 0xf1, 0xe8, 0x3a, 0xd5, + 0x56, 0xba, 0xd2, 0x39, 0xd0, 0x46, 0x2b, 0x2c, 0x4e, 0x50, 0x6b, 0xe7, 0x4b, 0x7f, 0xcf, 0xd2, + 0x6b, 0xd7, 0xe0, 0x07, 0x74, 0xfb, 0x6f, 0xb1, 0xaf, 0x0b, 0x99, 0x0a, 0x5e, 0x6c, 0xff, 0xfc, + 0x83, 0x43, 0x2c, 0x71, 0xf4, 0x42, 0x57, 0xdf, 0x74, 0x5a, 0x73, 0xed, 0x68, 0x73, 0xd1, 0x0d, + 0xeb, 0xfe, 0x0e, 0x09, 0x76, 0xd9, 0x5d, 0x7c, 0x28, 0xbe, 0xd0, 0x29, 0xc4, 0xc2, 0xe5, 0xb9, + 0x2a, 0xa5, 0xc4, 0xe9, 0x32, 0x68, 0x0e, 0xc6, 0x25, 0xb0, 0x46, 0x42, 0x76, 0x04, 0x0c, 0x33, + 0x36, 0xea, 0x31, 0x9d, 0x00, 0x2b, 0x26, 0x49, 0x7a, 0x53, 0xc0, 0x85, 0xc3, 0x10, 0x70, 0x3f, + 0x08, 0xa3, 0xae, 0xe7, 0x46, 0xae, 0x13, 0xf9, 0xdc, 0x7e, 0xc4, 0xaf, 0xdd, 0x4c, 0xc1, 0x5c, + 0xd1, 0x11, 0xd8, 0xa4, 0xb3, 0xff, 0x6d, 0x1f, 0x4c, 0xb2, 0x61, 0x7b, 0x6f, 0x86, 0x7d, 0x27, + 0xcd, 0xb0, 0xeb, 0xe9, 0x19, 0x76, 0x18, 0x92, 0xfb, 0x7d, 0x4f, 0xb3, 0x4f, 0x43, 0x49, 0xbd, + 0x1f, 0x94, 0x0f, 0x88, 0xad, 0x9c, 0x07, 0xc4, 0xdd, 0x4f, 0x6f, 0xe9, 0x92, 0x56, 0xcc, 0x74, + 0x49, 0xfb, 0x8a, 0x05, 0xb1, 0x61, 0x01, 0xbd, 0x01, 0xa5, 0x96, 0xcf, 0x3c, 0x5c, 0x03, 0xe9, + 0x36, 0xfe, 0x58, 0x47, 0xcb, 0x04, 0x8f, 0xc0, 0x16, 0xf0, 0x5e, 0xa8, 0xca, 0xa2, 0x38, 0xe6, + 0x82, 0xae, 0xc0, 0x60, 0x2b, 0x20, 0xb5, 0x88, 0x85, 0x07, 0xea, 0x9d, 0x21, 0x9f, 0x35, 0xbc, + 0x20, 0x96, 0x1c, 0xec, 0x5f, 0x28, 0xc0, 0x44, 0x92, 0x14, 0xbd, 0x0a, 0x7d, 0xe4, 0x0e, 0xa9, + 0x8b, 0xf6, 0x66, 0x1e, 0xc5, 0xb1, 0x6a, 0x82, 0x77, 0x00, 0xfd, 0x8f, 0x59, 0x29, 0x74, 0x19, + 0x06, 0xe9, 0x39, 0x7c, 0x49, 0x85, 0xc2, 0x7b, 0x24, 0xef, 0x2c, 0x57, 0x02, 0x0d, 0x6f, 0x9c, + 0x00, 0x61, 0x59, 0x9c, 0xf9, 0x81, 0xd5, 0x5b, 0x35, 0x7a, 0xc5, 0x89, 0x3a, 0xdd, 0xc4, 0xd7, + 0x16, 0xaa, 0x9c, 0x48, 0x70, 0xe3, 0x7e, 0x60, 0x12, 0x88, 0x63, 0x26, 0xe8, 0x23, 0xd0, 0x1f, + 0x36, 0x09, 0x69, 0x09, 0x43, 0x7f, 0xa6, 0x72, 0xb1, 0x46, 0x09, 0x04, 0x27, 0xa6, 0x8c, 0x60, + 0x00, 0xcc, 0x0b, 0xda, 0xbf, 0x68, 0x01, 0x70, 0xc7, 0x39, 0xc7, 0xdb, 0x20, 0x47, 0xa0, 0x8f, + 0x5f, 0x84, 0xbe, 0xb0, 0x45, 0xea, 0x9d, 0xdc, 0xb7, 0xe3, 0xf6, 0xd4, 0x5a, 0xa4, 0x1e, 0xcf, + 0x59, 0xfa, 0x0f, 0xb3, 0xd2, 0xf6, 0xf7, 0x03, 0x8c, 0xc5, 0x64, 0x95, 0x88, 0x6c, 0xa3, 0x67, + 0x8d, 0xb0, 0x25, 0x27, 0x13, 0x61, 0x4b, 0x4a, 0x8c, 0x5a, 0x53, 0xfd, 0x7e, 0x1a, 0x8a, 0xdb, + 0xce, 0x1d, 0xa1, 0xdb, 0x7b, 0xba, 0x73, 0x33, 0x28, 0xff, 0xd9, 0x15, 0xe7, 0x0e, 0xbf, 0xfe, + 0x3e, 0x2d, 0xd7, 0xd8, 0x8a, 0x73, 0xa7, 0xab, 0x8b, 0x31, 0xad, 0x84, 0xd5, 0xe5, 0x7a, 0xc2, + 0x27, 0xac, 0xa7, 0xba, 0x5c, 0x2f, 0x59, 0x97, 0xeb, 0xf5, 0x50, 0x97, 0xeb, 0xa1, 0xbb, 0x30, + 0x28, 0x5c, 0x36, 0x45, 0x60, 0xb3, 0x0b, 0x3d, 0xd4, 0x27, 0x3c, 0x3e, 0x79, 0x9d, 0x17, 0xe4, + 0xf5, 0x5e, 0x40, 0xbb, 0xd6, 0x2b, 0x2b, 0x44, 0xff, 0xb7, 0x05, 0x63, 0xe2, 0x37, 0x26, 0x6f, + 0xb7, 0x49, 0x18, 0x09, 0xf1, 0xf7, 0x03, 0xbd, 0xb7, 0x41, 0x14, 0xe4, 0x4d, 0xf9, 0x80, 0x3c, + 0xa9, 0x4c, 0x64, 0xd7, 0x16, 0x25, 0x5a, 0x81, 0x7e, 0xc1, 0x82, 0x63, 0xdb, 0xce, 0x1d, 0x5e, + 0x23, 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xfa, 0xf0, 0x6a, 0x6f, 0xc3, 0x9f, 0x2a, 0xce, 0x1b, + 0x29, 0xed, 0x9c, 0xc7, 0xb2, 0x48, 0xba, 0x36, 0x35, 0xb3, 0x5d, 0x33, 0xeb, 0x30, 0x24, 0xe7, + 0xdb, 0x83, 0xf4, 0x0f, 0x67, 0xf5, 0x88, 0xb9, 0xf6, 0x40, 0xeb, 0xf9, 0x34, 0x8c, 0xe8, 0x73, + 0xec, 0x81, 0xd6, 0xf5, 0x36, 0x4c, 0x65, 0xcc, 0xa5, 0x07, 0x5a, 0xe5, 0x6d, 0x38, 0x99, 0x3b, + 0x3f, 0x1e, 0xa8, 0x7f, 0xff, 0xd7, 0x2c, 0x7d, 0x1f, 0x3c, 0x02, 0xa3, 0xc8, 0x82, 0x69, 0x14, + 0x39, 0xd3, 0x79, 0xe5, 0xe4, 0x58, 0x46, 0xde, 0xd2, 0x1b, 0x4d, 0x77, 0x75, 0xf4, 0x3a, 0x0c, + 0x34, 0x29, 0x44, 0x3a, 0xfe, 0xda, 0xdd, 0x57, 0x64, 0x2c, 0x8e, 0x32, 0x78, 0x88, 0x05, 0x07, + 0xfb, 0x57, 0x2c, 0xe8, 0x3b, 0x82, 0x9e, 0xc0, 0x66, 0x4f, 0x3c, 0x9b, 0xcb, 0x5a, 0xc4, 0x78, + 0x9f, 0xc5, 0xce, 0xed, 0xa5, 0x3b, 0x11, 0xf1, 0x42, 0x76, 0xa6, 0x67, 0x76, 0xcc, 0x9e, 0x05, + 0x53, 0x57, 0x7d, 0xa7, 0x31, 0xef, 0x34, 0x1d, 0xaf, 0x4e, 0x82, 0x8a, 0xb7, 0x71, 0x20, 0xaf, + 0xf5, 0x42, 0x57, 0xaf, 0xf5, 0x97, 0x60, 0xc0, 0x6d, 0x69, 0x31, 0xab, 0xcf, 0xd2, 0x0e, 0xac, + 0x54, 0x45, 0xb8, 0x6a, 0x64, 0x54, 0xce, 0xa0, 0x58, 0xd0, 0xd3, 0x91, 0xe7, 0xee, 0x62, 0x7d, + 0xf9, 0x23, 0x4f, 0xa5, 0xf8, 0x64, 0x08, 0x28, 0xc3, 0xb1, 0x79, 0x13, 0x8c, 0x2a, 0xc4, 0xab, + 0x2f, 0x0c, 0x83, 0x2e, 0xff, 0x52, 0x31, 0xfc, 0x4f, 0x64, 0x4b, 0xd7, 0xa9, 0x8e, 0xd1, 0xde, + 0x33, 0x71, 0x00, 0x96, 0x8c, 0xec, 0x97, 0x20, 0x33, 0x64, 0x47, 0x77, 0xcd, 0x89, 0xfd, 0x31, + 0x98, 0x64, 0x25, 0x0f, 0xa8, 0x95, 0xb0, 0x13, 0xfa, 0xde, 0x8c, 0xf0, 0xa3, 0xf6, 0x17, 0x2c, + 0x18, 0x5f, 0x4d, 0x44, 0x65, 0x3c, 0xc7, 0x2c, 0xc4, 0x19, 0x66, 0x86, 0x1a, 0x83, 0x62, 0x81, + 0x3d, 0x74, 0x35, 0xdc, 0xdf, 0x58, 0x10, 0x47, 0xd1, 0x39, 0x02, 0xc1, 0x6f, 0xc1, 0x10, 0xfc, + 0x32, 0x85, 0x68, 0xd5, 0x9c, 0x3c, 0xb9, 0x0f, 0x5d, 0x51, 0xf1, 0xe5, 0x3a, 0xc8, 0xcf, 0x31, + 0x1b, 0x3e, 0x15, 0xc7, 0xcc, 0x20, 0x74, 0x32, 0xe2, 0x9c, 0xfd, 0x07, 0x05, 0x40, 0x8a, 0xb6, + 0xe7, 0xf8, 0x77, 0xe9, 0x12, 0x87, 0x13, 0xff, 0x6e, 0x07, 0x10, 0xf3, 0x71, 0x08, 0x1c, 0x2f, + 0xe4, 0x6c, 0x5d, 0xa1, 0x78, 0x3c, 0x98, 0x03, 0xc5, 0x8c, 0x7c, 0x10, 0x77, 0x35, 0xc5, 0x0d, + 0x67, 0xd4, 0xa0, 0xf9, 0xae, 0xf4, 0xf7, 0xea, 0xbb, 0x32, 0xd0, 0xe5, 0x65, 0xe7, 0x57, 0x2d, + 0x18, 0x55, 0xdd, 0xf4, 0x2e, 0xf1, 0xff, 0x57, 0xed, 0xc9, 0xd9, 0x7a, 0xab, 0x5a, 0x93, 0xd9, + 0x91, 0xf4, 0x5d, 0xec, 0x85, 0xae, 0xd3, 0x74, 0xef, 0x12, 0x15, 0x2f, 0xb5, 0x2c, 0x5e, 0xdc, + 0x0a, 0xe8, 0xfe, 0x5e, 0x79, 0x54, 0xfd, 0xe3, 0xf1, 0xe0, 0xe3, 0x22, 0xf6, 0x4f, 0xd1, 0xc5, + 0x6e, 0x4e, 0x45, 0xf4, 0x22, 0xf4, 0xb7, 0x36, 0x9d, 0x90, 0x24, 0xde, 0x49, 0xf5, 0x57, 0x29, + 0x70, 0x7f, 0xaf, 0x3c, 0xa6, 0x0a, 0x30, 0x08, 0xe6, 0xd4, 0xbd, 0x47, 0x15, 0x4c, 0x4f, 0xce, + 0xae, 0x51, 0x05, 0xff, 0xca, 0x82, 0xbe, 0x55, 0xba, 0xc1, 0x3f, 0xf8, 0x2d, 0xe0, 0x35, 0x63, + 0x0b, 0x38, 0x95, 0x97, 0xaa, 0x23, 0x77, 0xf5, 0x2f, 0x27, 0x56, 0xff, 0x99, 0x5c, 0x0e, 0x9d, + 0x17, 0xfe, 0x36, 0x0c, 0xb3, 0x04, 0x20, 0xe2, 0x4d, 0xd8, 0xf3, 0xc6, 0x82, 0x2f, 0x27, 0x16, + 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0x27, 0x61, 0x50, 0x3c, 0x32, 0x4a, 0x3e, 0x74, 0x16, 0xb4, + 0x58, 0xe2, 0xed, 0x1f, 0x2f, 0x82, 0x91, 0x70, 0x04, 0xfd, 0x9a, 0x05, 0xb3, 0x01, 0x77, 0x3e, + 0x6e, 0x2c, 0xb6, 0x03, 0xd7, 0xdb, 0xa8, 0xd5, 0x37, 0x49, 0xa3, 0xdd, 0x74, 0xbd, 0x8d, 0xca, + 0x86, 0xe7, 0x2b, 0xf0, 0xd2, 0x1d, 0x52, 0x6f, 0x33, 0xc3, 0x60, 0x97, 0xec, 0x26, 0xca, 0x89, + 0xff, 0xb9, 0x7b, 0x7b, 0xe5, 0x59, 0x7c, 0x20, 0xde, 0xf8, 0x80, 0x6d, 0x41, 0xbf, 0x6b, 0xc1, + 0x05, 0x9e, 0x87, 0xa3, 0xf7, 0xf6, 0x77, 0xb8, 0x67, 0x57, 0x25, 0xab, 0x98, 0xc9, 0x1a, 0x09, + 0xb6, 0xe7, 0x3f, 0x28, 0x3a, 0xf4, 0x42, 0xf5, 0x60, 0x75, 0xe1, 0x83, 0x36, 0xce, 0xfe, 0x87, + 0x45, 0x18, 0x15, 0xd1, 0xe7, 0xc4, 0x19, 0xf0, 0xa2, 0x31, 0x25, 0x1e, 0x49, 0x4c, 0x89, 0x49, + 0x83, 0xf8, 0x70, 0xb6, 0xff, 0x10, 0x26, 0xe9, 0xe6, 0x7c, 0x99, 0x38, 0x41, 0x74, 0x8b, 0x38, + 0xdc, 0x25, 0xad, 0x78, 0xe0, 0xdd, 0x5f, 0xe9, 0x46, 0xaf, 0x26, 0x99, 0xe1, 0x34, 0xff, 0xef, + 0xa4, 0x33, 0xc7, 0x83, 0x89, 0x54, 0x00, 0xc1, 0x37, 0xa1, 0xa4, 0x5e, 0xc8, 0x88, 0x4d, 0xa7, + 0x73, 0x1c, 0xce, 0x24, 0x07, 0xae, 0x7a, 0x8b, 0x5f, 0x67, 0xc5, 0xec, 0xec, 0xbf, 0x5b, 0x30, + 0x2a, 0xe4, 0x83, 0xb8, 0x0a, 0x43, 0x4e, 0x18, 0xba, 0x1b, 0x1e, 0x69, 0x74, 0xd2, 0x8e, 0xa6, + 0xaa, 0x61, 0xaf, 0x94, 0xe6, 0x44, 0x49, 0xac, 0x78, 0xa0, 0xcb, 0xdc, 0xf1, 0x6f, 0x87, 0x74, + 0x52, 0x8d, 0xa6, 0xb8, 0x81, 0x74, 0x0d, 0xdc, 0x21, 0x58, 0x94, 0x47, 0x9f, 0xe0, 0x9e, 0x99, + 0x57, 0x3c, 0xff, 0xb6, 0x77, 0xc9, 0xf7, 0x65, 0xa4, 0x91, 0xde, 0x18, 0x4e, 0x4a, 0x7f, 0x4c, + 0x55, 0x1c, 0x9b, 0xdc, 0x7a, 0x8b, 0xc8, 0xfb, 0x19, 0x60, 0x79, 0x07, 0xcc, 0x07, 0xe9, 0x21, + 0x22, 0x30, 0x2e, 0x42, 0x1b, 0x4a, 0x98, 0xe8, 0xbb, 0xcc, 0x4b, 0xa0, 0x59, 0x3a, 0x56, 0xe2, + 0x5f, 0x31, 0x59, 0xe0, 0x24, 0x4f, 0xfb, 0xa7, 0x2d, 0x60, 0x8f, 0x73, 0x8f, 0x40, 0x1e, 0xf9, + 0xb0, 0x29, 0x8f, 0x4c, 0xe7, 0x75, 0x72, 0x8e, 0x28, 0xf2, 0x02, 0x9f, 0x59, 0xd5, 0xc0, 0xbf, + 0xb3, 0x2b, 0xdc, 0x69, 0xba, 0xdf, 0x3f, 0xec, 0xff, 0x66, 0xf1, 0x4d, 0x2c, 0x0e, 0x65, 0xf0, + 0x59, 0x18, 0xaa, 0x3b, 0x2d, 0xa7, 0xce, 0xb3, 0x63, 0xe5, 0xea, 0x02, 0x8d, 0x42, 0xb3, 0x0b, + 0xa2, 0x04, 0xd7, 0x6d, 0xc9, 0x10, 0x99, 0x43, 0x12, 0xdc, 0x55, 0x9f, 0xa5, 0xaa, 0x9c, 0xd9, + 0x82, 0x51, 0x83, 0xd9, 0x03, 0x55, 0x84, 0x7c, 0x96, 0x1f, 0xb1, 0x2a, 0xa4, 0xeb, 0x36, 0x4c, + 0x7a, 0xda, 0x7f, 0x7a, 0xa0, 0xc8, 0xcb, 0xe5, 0x63, 0xdd, 0x0e, 0x51, 0x76, 0xfa, 0x68, 0xef, + 0x7e, 0x13, 0x6c, 0x70, 0x9a, 0xb3, 0xfd, 0x13, 0x16, 0x3c, 0xa4, 0x13, 0x6a, 0x4f, 0x8b, 0xba, + 0x19, 0x68, 0x16, 0x61, 0xc8, 0x6f, 0x91, 0xc0, 0x89, 0xfc, 0x40, 0x9c, 0x1a, 0xe7, 0x65, 0xa7, + 0x5f, 0x13, 0xf0, 0x7d, 0x91, 0xeb, 0x41, 0x72, 0x97, 0x70, 0xac, 0x4a, 0xd2, 0xdb, 0x27, 0xeb, + 0x8c, 0x50, 0x3c, 0x22, 0x63, 0x7b, 0x00, 0xb3, 0xf5, 0x87, 0x58, 0x60, 0xec, 0x3f, 0xb7, 0xf8, + 0xc4, 0xd2, 0x9b, 0x8e, 0xde, 0x86, 0x89, 0x6d, 0x27, 0xaa, 0x6f, 0x2e, 0xdd, 0x69, 0x05, 0xdc, + 0xdc, 0x25, 0xfb, 0xe9, 0xe9, 0x6e, 0xfd, 0xa4, 0x7d, 0x64, 0xec, 0x6c, 0xba, 0x92, 0x60, 0x86, + 0x53, 0xec, 0xd1, 0x2d, 0x18, 0x66, 0x30, 0xf6, 0x3e, 0x32, 0xec, 0x24, 0x1a, 0xe4, 0xd5, 0xa6, + 0xdc, 0x25, 0x56, 0x62, 0x3e, 0x58, 0x67, 0x6a, 0x7f, 0xa5, 0xc8, 0x57, 0x3b, 0x13, 0xe5, 0x9f, + 0x84, 0xc1, 0x96, 0xdf, 0x58, 0xa8, 0x2c, 0x62, 0x31, 0x0a, 0xea, 0x18, 0xa9, 0x72, 0x30, 0x96, + 0x78, 0x74, 0x1e, 0x86, 0xc4, 0x4f, 0x69, 0x9e, 0x64, 0x7b, 0xb3, 0xa0, 0x0b, 0xb1, 0xc2, 0xa2, + 0xe7, 0x00, 0x5a, 0x81, 0xbf, 0xe3, 0x36, 0x58, 0xbc, 0x94, 0xa2, 0xe9, 0xe9, 0x54, 0x55, 0x18, + 0xac, 0x51, 0xa1, 0x57, 0x60, 0xb4, 0xed, 0x85, 0x5c, 0x1c, 0xd1, 0xa2, 0x52, 0x2b, 0x1f, 0x9c, + 0xeb, 0x3a, 0x12, 0x9b, 0xb4, 0x68, 0x0e, 0x06, 0x22, 0x87, 0x79, 0xee, 0xf4, 0xe7, 0x3b, 0x24, + 0xaf, 0x51, 0x0a, 0x3d, 0x11, 0x13, 0x2d, 0x80, 0x45, 0x41, 0xf4, 0xa6, 0x7c, 0xaa, 0xcc, 0x37, + 0x76, 0xf1, 0x12, 0xa0, 0xb7, 0x43, 0x40, 0x7b, 0xa8, 0x2c, 0x5e, 0x18, 0x18, 0xbc, 0xd0, 0xcb, + 0x00, 0xe4, 0x4e, 0x44, 0x02, 0xcf, 0x69, 0x2a, 0x7f, 0x3b, 0x25, 0x17, 0x2c, 0xfa, 0xab, 0x7e, + 0x74, 0x3d, 0x24, 0x4b, 0x8a, 0x02, 0x6b, 0xd4, 0xf6, 0xef, 0x96, 0x00, 0x62, 0xb9, 0x1d, 0xdd, + 0x4d, 0x6d, 0x5c, 0xcf, 0x74, 0x96, 0xf4, 0x0f, 0x6f, 0xd7, 0x42, 0x9f, 0xb7, 0x60, 0x58, 0x84, + 0x85, 0x61, 0x23, 0x54, 0xe8, 0xbc, 0x71, 0x9a, 0xd1, 0x69, 0x68, 0x09, 0xde, 0x84, 0xe7, 0xe5, + 0x0c, 0xd5, 0x30, 0x5d, 0x5b, 0xa1, 0x57, 0x8c, 0xde, 0x2f, 0xaf, 0x8a, 0x45, 0xa3, 0x2b, 0xd5, + 0x55, 0xb1, 0xc4, 0xce, 0x08, 0xfd, 0x96, 0x78, 0xdd, 0xb8, 0x25, 0xf6, 0xe5, 0xbf, 0xc5, 0x34, + 0xc4, 0xd7, 0x6e, 0x17, 0x44, 0x54, 0xd5, 0xe3, 0x32, 0xf4, 0xe7, 0x3f, 0x20, 0xd4, 0xee, 0x49, + 0x5d, 0x62, 0x32, 0x7c, 0x1a, 0xc6, 0x1b, 0xa6, 0x10, 0x20, 0x66, 0xe2, 0x13, 0x79, 0x7c, 0x13, + 0x32, 0x43, 0x7c, 0xec, 0x27, 0x10, 0x38, 0xc9, 0x18, 0x55, 0x79, 0x98, 0x8e, 0x8a, 0xb7, 0xee, + 0x8b, 0xd7, 0x28, 0x76, 0xee, 0x58, 0xee, 0x86, 0x11, 0xd9, 0xa6, 0x94, 0xf1, 0xe9, 0xbe, 0x2a, + 0xca, 0x62, 0xc5, 0x05, 0xbd, 0x0e, 0x03, 0xec, 0x05, 0x59, 0x38, 0x3d, 0x94, 0xaf, 0xab, 0x36, + 0xe3, 0x15, 0xc6, 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0x2e, 0xcb, 0xf7, 0x99, 0x61, 0xc5, + 0xbb, 0x1e, 0x12, 0xf6, 0x3e, 0xb3, 0x34, 0xff, 0x58, 0xfc, 0xf4, 0x92, 0xc3, 0x33, 0xd3, 0x35, + 0x1a, 0x25, 0xa9, 0x14, 0x25, 0xfe, 0xcb, 0x2c, 0x90, 0x22, 0xba, 0x52, 0x66, 0xf3, 0xcc, 0x4c, + 0x91, 0x71, 0x77, 0xde, 0x30, 0x59, 0xe0, 0x24, 0x4f, 0x2a, 0x91, 0xf2, 0x55, 0x2f, 0xde, 0xb3, + 0x74, 0xdb, 0x3b, 0xf8, 0x45, 0x9c, 0x9d, 0x46, 0x1c, 0x82, 0x45, 0xf9, 0x23, 0x15, 0x0f, 0x66, + 0x3c, 0x98, 0x48, 0x2e, 0xd1, 0x07, 0x2a, 0x8e, 0xfc, 0x69, 0x1f, 0x8c, 0x99, 0x53, 0x0a, 0x5d, + 0x80, 0x92, 0x60, 0xa2, 0x32, 0xa9, 0xa8, 0x55, 0xb2, 0x22, 0x11, 0x38, 0xa6, 0x61, 0x09, 0x74, + 0x58, 0x71, 0xcd, 0x81, 0x39, 0x4e, 0xa0, 0xa3, 0x30, 0x58, 0xa3, 0xa2, 0x17, 0xab, 0x5b, 0xbe, + 0x1f, 0xa9, 0x03, 0x49, 0xcd, 0xbb, 0x79, 0x06, 0xc5, 0x02, 0x4b, 0x0f, 0xa2, 0x2d, 0x12, 0x78, + 0xa4, 0x69, 0x46, 0x30, 0x57, 0x07, 0xd1, 0x15, 0x1d, 0x89, 0x4d, 0x5a, 0x7a, 0x9c, 0xfa, 0x21, + 0x9b, 0xc8, 0xe2, 0xfa, 0x16, 0x3b, 0x84, 0xd7, 0xf8, 0xd3, 0x76, 0x89, 0x47, 0x1f, 0x83, 0x87, + 0x54, 0xb4, 0x30, 0xcc, 0xed, 0x20, 0xb2, 0xc6, 0x01, 0x43, 0xdb, 0xf2, 0xd0, 0x42, 0x36, 0x19, + 0xce, 0x2b, 0x8f, 0x5e, 0x83, 0x31, 0x21, 0xe2, 0x4b, 0x8e, 0x83, 0xa6, 0x77, 0xd3, 0x15, 0x03, + 0x8b, 0x13, 0xd4, 0x32, 0x06, 0x3b, 0x93, 0xb2, 0x25, 0x87, 0xa1, 0x74, 0x0c, 0x76, 0x1d, 0x8f, + 0x53, 0x25, 0xd0, 0x1c, 0x8c, 0x73, 0x19, 0xcc, 0xf5, 0x36, 0xf8, 0x98, 0x88, 0xe7, 0x66, 0x6a, + 0x49, 0x5d, 0x33, 0xd1, 0x38, 0x49, 0x8f, 0x5e, 0x82, 0x11, 0x27, 0xa8, 0x6f, 0xba, 0x11, 0xa9, + 0x47, 0xed, 0x80, 0xbf, 0x43, 0xd3, 0xdc, 0xc3, 0xe6, 0x34, 0x1c, 0x36, 0x28, 0xed, 0xbb, 0x30, + 0x95, 0x11, 0xf3, 0x82, 0x4e, 0x1c, 0xa7, 0xe5, 0xca, 0x6f, 0x4a, 0xf8, 0x60, 0xcf, 0x55, 0x2b, + 0xf2, 0x6b, 0x34, 0x2a, 0x3a, 0x3b, 0x59, 0x6c, 0x0c, 0x2d, 0xe9, 0xab, 0x9a, 0x9d, 0xcb, 0x12, + 0x81, 0x63, 0x1a, 0xfb, 0x3f, 0x16, 0x60, 0x3c, 0xc3, 0xb6, 0xc2, 0x12, 0x8f, 0x26, 0x2e, 0x29, + 0x71, 0x9e, 0x51, 0x33, 0xa4, 0x7f, 0xe1, 0x00, 0x21, 0xfd, 0x8b, 0xdd, 0x42, 0xfa, 0xf7, 0xbd, + 0x93, 0x90, 0xfe, 0x66, 0x8f, 0xf5, 0xf7, 0xd4, 0x63, 0x19, 0x69, 0x00, 0x06, 0x0e, 0x98, 0x06, + 0xc0, 0xe8, 0xf4, 0xc1, 0x1e, 0x3a, 0xfd, 0x47, 0x0a, 0x30, 0x91, 0x74, 0x63, 0x3d, 0x02, 0xbd, + 0xed, 0xeb, 0x86, 0xde, 0xf6, 0x7c, 0x2f, 0xcf, 0x83, 0x73, 0x75, 0xb8, 0x38, 0xa1, 0xc3, 0x7d, + 0xaa, 0x27, 0x6e, 0x9d, 0xf5, 0xb9, 0x3f, 0x59, 0x80, 0xe3, 0x99, 0xef, 0x93, 0x8f, 0xa0, 0x6f, + 0xae, 0x19, 0x7d, 0xf3, 0x6c, 0xcf, 0x4f, 0xa7, 0x73, 0x3b, 0xe8, 0x66, 0xa2, 0x83, 0x2e, 0xf4, + 0xce, 0xb2, 0x73, 0x2f, 0x7d, 0xa3, 0x08, 0x67, 0x32, 0xcb, 0xc5, 0x6a, 0xcf, 0x65, 0x43, 0xed, + 0xf9, 0x5c, 0x42, 0xed, 0x69, 0x77, 0x2e, 0x7d, 0x38, 0x7a, 0x50, 0xf1, 0x84, 0x98, 0x05, 0x42, + 0xb8, 0x4f, 0x1d, 0xa8, 0xf1, 0x84, 0x58, 0x31, 0xc2, 0x26, 0xdf, 0xef, 0x24, 0xdd, 0xe7, 0xef, + 0x58, 0x70, 0x32, 0x73, 0x6c, 0x8e, 0x40, 0xd7, 0xb5, 0x6a, 0xea, 0xba, 0x9e, 0xec, 0x79, 0xb6, + 0xe6, 0x28, 0xbf, 0x7e, 0xae, 0x3f, 0xe7, 0x5b, 0xd8, 0x4d, 0xfe, 0x1a, 0x0c, 0x3b, 0xf5, 0x3a, + 0x09, 0xc3, 0x15, 0xbf, 0xa1, 0xa2, 0x7f, 0x3f, 0xcb, 0xee, 0x59, 0x31, 0x78, 0x7f, 0xaf, 0x3c, + 0x93, 0x64, 0x11, 0xa3, 0xb1, 0xce, 0x01, 0x7d, 0x02, 0x86, 0x42, 0x71, 0x6e, 0x8a, 0xb1, 0x7f, + 0xbe, 0xc7, 0xce, 0x71, 0x6e, 0x91, 0xa6, 0x19, 0x66, 0x4a, 0x69, 0x2a, 0x14, 0x4b, 0xf4, 0xbf, + 0xe8, 0x21, 0x69, 0xd2, 0x52, 0x65, 0x22, 0x40, 0xca, 0x7d, 0x04, 0xa6, 0x79, 0x0e, 0x60, 0x47, + 0x5d, 0x09, 0x92, 0x5a, 0x08, 0xed, 0xb2, 0xa0, 0x51, 0xa1, 0x8f, 0xc0, 0x44, 0xc8, 0xa3, 0x31, + 0x2e, 0x34, 0x9d, 0x90, 0xbd, 0xf7, 0x11, 0x73, 0x91, 0x05, 0xb4, 0xaa, 0x25, 0x70, 0x38, 0x45, + 0x8d, 0x96, 0x65, 0xad, 0xcc, 0x93, 0x84, 0x4f, 0xcf, 0x73, 0x71, 0x8d, 0xc2, 0x9b, 0xe4, 0x58, + 0x72, 0x10, 0x58, 0xf7, 0x6b, 0x25, 0xd1, 0x27, 0x00, 0xe8, 0x24, 0x12, 0xda, 0x88, 0xc1, 0xfc, + 0x2d, 0x94, 0xee, 0x2d, 0x8d, 0x4c, 0xf7, 0x6a, 0xf6, 0xf6, 0x77, 0x51, 0x31, 0xc1, 0x1a, 0x43, + 0xe4, 0xc0, 0x68, 0xfc, 0x2f, 0xce, 0x0d, 0x7c, 0x3e, 0xb7, 0x86, 0x24, 0x73, 0xa6, 0xf8, 0x5e, + 0xd4, 0x59, 0x60, 0x93, 0xa3, 0xfd, 0xef, 0x86, 0xe0, 0xe1, 0x0e, 0x9b, 0x31, 0x9a, 0x33, 0x0d, + 0xbe, 0x4f, 0x27, 0x6f, 0xf1, 0x33, 0x99, 0x85, 0x8d, 0x6b, 0x7d, 0x62, 0xce, 0x17, 0xde, 0xf1, + 0x9c, 0xff, 0x21, 0x4b, 0xd3, 0xaf, 0x70, 0xa7, 0xd4, 0x0f, 0x1f, 0xf0, 0x90, 0x39, 0x44, 0x85, + 0xcb, 0x7a, 0x86, 0xd6, 0xe2, 0xb9, 0x9e, 0x9b, 0xd3, 0xbb, 0x1a, 0xe3, 0x6b, 0xd9, 0x21, 0x88, + 0xb9, 0x42, 0xe3, 0xd2, 0x41, 0xbf, 0xff, 0xa8, 0xc2, 0x11, 0xff, 0x81, 0x05, 0x27, 0x53, 0x60, + 0xde, 0x06, 0x12, 0x8a, 0x28, 0x59, 0xab, 0xef, 0xb8, 0xf1, 0x92, 0x21, 0xff, 0x86, 0xcb, 0xe2, + 0x1b, 0x4e, 0xe6, 0xd2, 0x25, 0x9b, 0xfe, 0xc5, 0x3f, 0x2e, 0x4f, 0xb1, 0x0a, 0x4c, 0x42, 0x9c, + 0xdf, 0xf4, 0xa3, 0xbd, 0xfe, 0x7f, 0x6b, 0xa2, 0x2f, 0xcf, 0x5c, 0x85, 0x33, 0x9d, 0xbb, 0xfa, + 0x40, 0x0f, 0xa4, 0x7f, 0xdf, 0x82, 0xd3, 0x1d, 0xa3, 0xf0, 0x7c, 0x1b, 0x4a, 0xbb, 0xf6, 0xe7, + 0x2c, 0x78, 0x24, 0xb3, 0x84, 0xe1, 0x23, 0x77, 0x01, 0x4a, 0xf5, 0x44, 0x46, 0xd6, 0x38, 0x1e, + 0x85, 0xca, 0xc6, 0x1a, 0xd3, 0x18, 0xae, 0x70, 0x85, 0xae, 0xae, 0x70, 0xbf, 0x69, 0x41, 0xea, + 0xac, 0x3a, 0x02, 0xd1, 0xa9, 0x62, 0x8a, 0x4e, 0x8f, 0xf5, 0xd2, 0x9b, 0x39, 0x52, 0xd3, 0x5f, + 0x8e, 0xc3, 0x89, 0x9c, 0xf7, 0x8d, 0x3b, 0x30, 0xb9, 0x51, 0x27, 0xe6, 0x83, 0xf6, 0x4e, 0x81, + 0x9e, 0x3a, 0xbe, 0x7e, 0xe7, 0x89, 0x70, 0x53, 0x24, 0x38, 0x5d, 0x05, 0xfa, 0x9c, 0x05, 0xc7, + 0x9c, 0xdb, 0xe1, 0x12, 0x15, 0x81, 0xdd, 0xfa, 0x7c, 0xd3, 0xaf, 0x6f, 0x51, 0xc9, 0x42, 0x2e, + 0xab, 0x17, 0x32, 0xd5, 0x92, 0x37, 0x6b, 0x29, 0x7a, 0xa3, 0x7a, 0x96, 0xf6, 0x3c, 0x8b, 0x0a, + 0x67, 0xd6, 0x85, 0xb0, 0xc8, 0xd0, 0x42, 0x2f, 0xd8, 0x1d, 0x42, 0x2e, 0x64, 0x3d, 0x44, 0xe5, + 0x32, 0x9d, 0xc4, 0x60, 0xc5, 0x07, 0x7d, 0x0a, 0x4a, 0x1b, 0xf2, 0x75, 0x75, 0x86, 0xcc, 0x18, + 0x77, 0x64, 0xe7, 0x37, 0xe7, 0xdc, 0xb7, 0x40, 0x11, 0xe1, 0x98, 0x29, 0x7a, 0x0d, 0x8a, 0xde, + 0x7a, 0xd8, 0x29, 0x73, 0x78, 0xc2, 0x89, 0x94, 0x07, 0x36, 0x59, 0x5d, 0xae, 0x61, 0x5a, 0x10, + 0x5d, 0x86, 0x62, 0x70, 0xab, 0x21, 0x74, 0xea, 0x99, 0x8b, 0x14, 0xcf, 0x2f, 0xe6, 0xb4, 0x8a, + 0x71, 0xc2, 0xf3, 0x8b, 0x98, 0xb2, 0x40, 0x55, 0xe8, 0x67, 0x8f, 0x02, 0x85, 0x6c, 0x96, 0x79, + 0x17, 0xed, 0xf0, 0xb8, 0x96, 0x3f, 0x38, 0x62, 0x04, 0x98, 0x33, 0x42, 0x6b, 0x30, 0x50, 0x67, + 0x59, 0xa6, 0x85, 0x30, 0xf6, 0xfe, 0x4c, 0xed, 0x79, 0x87, 0xf4, 0xdb, 0x42, 0x99, 0xcc, 0x28, + 0xb0, 0xe0, 0xc5, 0xb8, 0x92, 0xd6, 0xe6, 0x7a, 0xc8, 0xb4, 0x6f, 0x79, 0x5c, 0x3b, 0x64, 0x95, + 0x17, 0x5c, 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0x58, 0xaf, 0x8b, 0x07, 0x7f, 0x99, 0x6a, + 0x74, 0x33, 0x36, 0xcd, 0xfc, 0xc0, 0xbd, 0xbd, 0x72, 0x61, 0x79, 0x01, 0x17, 0xd6, 0xeb, 0x68, + 0x15, 0x06, 0xd7, 0x79, 0x34, 0x0b, 0xa1, 0x29, 0x7f, 0x22, 0x3b, 0xd0, 0x46, 0x2a, 0xe0, 0x05, + 0x7f, 0x3c, 0x26, 0x10, 0x58, 0x32, 0x61, 0x09, 0x43, 0x54, 0x54, 0x0e, 0x11, 0x14, 0x70, 0xf6, + 0x60, 0x91, 0x54, 0xb8, 0xac, 0x1c, 0xc7, 0xf6, 0xc0, 0x1a, 0x47, 0x3a, 0xab, 0x9d, 0xbb, 0xed, + 0x80, 0x45, 0x8c, 0x17, 0xd1, 0xa3, 0x32, 0x67, 0xf5, 0x9c, 0x24, 0xea, 0x34, 0xab, 0x15, 0x11, + 0x8e, 0x99, 0xa2, 0x2d, 0x18, 0xdd, 0x09, 0x5b, 0x9b, 0x44, 0x2e, 0x69, 0x16, 0x4c, 0x2a, 0x47, + 0xd6, 0xbb, 0x21, 0x08, 0xdd, 0x20, 0x6a, 0x3b, 0xcd, 0xd4, 0x2e, 0xc4, 0xe4, 0xf2, 0x1b, 0x3a, + 0x33, 0x6c, 0xf2, 0xa6, 0xdd, 0xff, 0x76, 0xdb, 0xbf, 0xb5, 0x1b, 0x11, 0x11, 0xcb, 0x2f, 0xb3, + 0xfb, 0xdf, 0xe0, 0x24, 0xe9, 0xee, 0x17, 0x08, 0x2c, 0x99, 0xa0, 0x1b, 0xa2, 0x7b, 0xd8, 0xee, + 0x39, 0x91, 0x1f, 0x28, 0x78, 0x4e, 0x12, 0xe5, 0x74, 0x0a, 0xdb, 0x2d, 0x63, 0x56, 0x6c, 0x97, + 0x6c, 0x6d, 0xfa, 0x91, 0xef, 0x25, 0x76, 0xe8, 0xc9, 0xfc, 0x5d, 0xb2, 0x9a, 0x41, 0x9f, 0xde, + 0x25, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0xa8, 0x01, 0x63, 0x2d, 0x3f, 0x88, 0x6e, 0xfb, 0x81, 0x9c, + 0x5f, 0xa8, 0x83, 0xa6, 0xcf, 0xa0, 0x14, 0x35, 0xb2, 0x30, 0x99, 0x26, 0x06, 0x27, 0x78, 0xa2, + 0x8f, 0xc2, 0x60, 0x58, 0x77, 0x9a, 0xa4, 0x72, 0x6d, 0x7a, 0x2a, 0xff, 0xf8, 0xa9, 0x71, 0x92, + 0x9c, 0xd9, 0xc5, 0x83, 0x91, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x65, 0xe8, 0x67, 0x89, 0x38, 0x59, + 0xe0, 0xc9, 0x9c, 0x78, 0xc7, 0x29, 0x97, 0x7e, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, 0xe9, 0x1a, + 0x10, 0x57, 0x5d, 0x3f, 0x9c, 0x3e, 0x9e, 0xbf, 0x06, 0xc4, 0x0d, 0xf9, 0x5a, 0xad, 0xd3, 0x1a, + 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe8, 0xe0, 0x8b, 0x96, 0xbb, 0x97, + 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0x27, 0x83, 0x69, 0x99, 0x85, 0xa9, 0x48, 0xfe, + 0x37, 0x2b, 0x65, 0x3d, 0xff, 0x40, 0xaf, 0x1a, 0xdb, 0x43, 0xbc, 0xd6, 0x7d, 0xce, 0x82, 0x13, + 0xad, 0xcc, 0x0f, 0x11, 0x02, 0x40, 0x6f, 0x8a, 0x5f, 0xfe, 0xe9, 0x2a, 0x48, 0x69, 0x36, 0x1e, + 0xe7, 0xd4, 0x94, 0xbc, 0x3a, 0x17, 0xdf, 0xf1, 0xd5, 0x79, 0x05, 0x86, 0xea, 0xfc, 0x9e, 0x23, + 0x83, 0x6b, 0xf7, 0x14, 0x62, 0x8f, 0x89, 0x12, 0xe2, 0x82, 0xb4, 0x8e, 0x15, 0x0b, 0xf4, 0xc3, + 0x16, 0x9c, 0x4e, 0x36, 0x1d, 0x13, 0x86, 0x16, 0x91, 0x4d, 0xb9, 0x5e, 0x66, 0x59, 0x7c, 0x7f, + 0x4a, 0xfe, 0x37, 0x88, 0xf7, 0xbb, 0x11, 0xe0, 0xce, 0x95, 0xa1, 0xc5, 0x0c, 0xc5, 0xd0, 0x80, + 0x69, 0x12, 0xeb, 0x41, 0x39, 0xf4, 0x02, 0x8c, 0x6c, 0xfb, 0x6d, 0x2f, 0x12, 0xae, 0x6b, 0xc2, + 0x8d, 0x86, 0xb9, 0x8f, 0xac, 0x68, 0x70, 0x6c, 0x50, 0x25, 0x54, 0x4a, 0x43, 0xf7, 0xad, 0x52, + 0x7a, 0x0b, 0x46, 0x3c, 0xcd, 0xd7, 0x5a, 0xc8, 0x03, 0xe7, 0xf2, 0x95, 0x6e, 0xba, 0x67, 0x36, + 0x6f, 0xa5, 0x0e, 0xc1, 0x06, 0xb7, 0xa3, 0xf5, 0x69, 0xfb, 0xf9, 0x42, 0x86, 0x50, 0xcf, 0xd5, + 0x4a, 0xaf, 0x9a, 0x6a, 0xa5, 0x73, 0x49, 0xb5, 0x52, 0xca, 0x1c, 0x62, 0x68, 0x94, 0x7a, 0x4f, + 0xd2, 0xd5, 0x73, 0x64, 0xd3, 0xef, 0xb5, 0xe0, 0x21, 0xa6, 0x5f, 0xa7, 0x15, 0xbc, 0x63, 0x9d, + 0xfa, 0xc3, 0xf7, 0xf6, 0xca, 0x0f, 0x5d, 0xcd, 0x66, 0x87, 0xf3, 0xea, 0xb1, 0x9b, 0x70, 0xb6, + 0xdb, 0xd1, 0xc8, 0xfc, 0x28, 0x1b, 0xca, 0x00, 0x1f, 0xfb, 0x51, 0x36, 0x2a, 0x8b, 0x98, 0x61, + 0x7a, 0x8d, 0xdb, 0x65, 0xff, 0x7b, 0x0b, 0x8a, 0x55, 0xbf, 0x71, 0x04, 0x97, 0xee, 0x0f, 0x1b, + 0x97, 0xee, 0x87, 0xb3, 0x0f, 0xe5, 0x46, 0xae, 0x41, 0x69, 0x29, 0x61, 0x50, 0x3a, 0x9d, 0xc7, + 0xa0, 0xb3, 0xf9, 0xe8, 0xa7, 0x8a, 0x30, 0x5c, 0xf5, 0x1b, 0xea, 0x11, 0xc3, 0x3f, 0xbe, 0x9f, + 0x47, 0x0c, 0xb9, 0x69, 0x57, 0x34, 0xce, 0xcc, 0xfd, 0x52, 0xbe, 0xfc, 0xfe, 0x36, 0x7b, 0xcb, + 0x70, 0x93, 0xb8, 0x1b, 0x9b, 0x11, 0x69, 0x24, 0x3f, 0xe7, 0xe8, 0xde, 0x32, 0x7c, 0xb3, 0x08, + 0xe3, 0x89, 0xda, 0x51, 0x13, 0x46, 0x9b, 0xba, 0xb9, 0x42, 0xcc, 0xd3, 0xfb, 0xb2, 0x74, 0x08, + 0x5f, 0x70, 0x0d, 0x84, 0x4d, 0xe6, 0x68, 0x16, 0x40, 0xd9, 0xef, 0xa5, 0xba, 0x9a, 0xdd, 0x3c, + 0x94, 0x81, 0x3f, 0xc4, 0x1a, 0x05, 0x7a, 0x11, 0x86, 0x23, 0xbf, 0xe5, 0x37, 0xfd, 0x8d, 0xdd, + 0x2b, 0x44, 0x86, 0x74, 0x53, 0x1e, 0x9e, 0x6b, 0x31, 0x0a, 0xeb, 0x74, 0xe8, 0x0e, 0x4c, 0x2a, + 0x26, 0xb5, 0x43, 0x30, 0xe1, 0x30, 0xcd, 0xc6, 0x6a, 0x92, 0x23, 0x4e, 0x57, 0x82, 0x5e, 0x86, + 0x31, 0xe6, 0x6a, 0xca, 0xca, 0x5f, 0x21, 0xbb, 0x32, 0xd4, 0x27, 0x13, 0x82, 0x57, 0x0c, 0x0c, + 0x4e, 0x50, 0xa2, 0x05, 0x98, 0xdc, 0x76, 0xc3, 0x44, 0xf1, 0x01, 0x56, 0x9c, 0x35, 0x60, 0x25, + 0x89, 0xc4, 0x69, 0x7a, 0xfb, 0x67, 0xc4, 0x18, 0x7b, 0x91, 0xfb, 0xde, 0x72, 0x7c, 0x77, 0x2f, + 0xc7, 0x6f, 0x58, 0x30, 0x41, 0x6b, 0x67, 0xfe, 0x73, 0x52, 0xd6, 0x51, 0xc1, 0xe0, 0xad, 0x0e, + 0xc1, 0xe0, 0xcf, 0xd1, 0x6d, 0xbb, 0xe1, 0xb7, 0x23, 0xa1, 0xc0, 0xd4, 0xf6, 0x65, 0x0a, 0xc5, + 0x02, 0x2b, 0xe8, 0x48, 0x10, 0x88, 0x37, 0xbf, 0x3a, 0x1d, 0x09, 0x02, 0x2c, 0xb0, 0x32, 0x56, + 0x7c, 0x5f, 0x76, 0xac, 0x78, 0x1e, 0xf2, 0x57, 0x78, 0x5a, 0x09, 0xa9, 0x53, 0x0b, 0xf9, 0x2b, + 0x5d, 0xb0, 0x62, 0x1a, 0xfb, 0x6b, 0x45, 0x18, 0xa9, 0xfa, 0x8d, 0xd8, 0x79, 0xe0, 0x05, 0xc3, + 0x79, 0xe0, 0x6c, 0xc2, 0x79, 0x60, 0x42, 0xa7, 0x7d, 0xcf, 0x55, 0xe0, 0x5b, 0xe5, 0x2a, 0xf0, + 0x1b, 0x16, 0x1b, 0xb5, 0xc5, 0xd5, 0x1a, 0x77, 0xc7, 0x44, 0x17, 0x61, 0x98, 0xed, 0x70, 0xec, + 0x91, 0xb9, 0xb4, 0xa8, 0xb3, 0xdc, 0x6d, 0xab, 0x31, 0x18, 0xeb, 0x34, 0xe8, 0x3c, 0x0c, 0x85, + 0xc4, 0x09, 0xea, 0x9b, 0x6a, 0x7b, 0x17, 0xe6, 0x6f, 0x0e, 0xc3, 0x0a, 0x8b, 0xde, 0x88, 0xa3, + 0xcd, 0x16, 0xf3, 0x1f, 0xad, 0xea, 0xed, 0xe1, 0x4b, 0x24, 0x3f, 0xc4, 0xac, 0x7d, 0x13, 0x50, + 0x9a, 0xbe, 0x87, 0x78, 0x88, 0x65, 0x33, 0x1e, 0x62, 0x29, 0x15, 0x0b, 0xf1, 0xaf, 0x2d, 0x18, + 0xab, 0xfa, 0x0d, 0xba, 0x74, 0xbf, 0x93, 0xd6, 0xa9, 0x1e, 0x6a, 0x7b, 0xa0, 0x43, 0xa8, 0xed, + 0x47, 0xa1, 0xbf, 0xea, 0x37, 0xba, 0xc4, 0x6c, 0xfc, 0xff, 0x2d, 0x18, 0xac, 0xfa, 0x8d, 0x23, + 0xb0, 0x8d, 0xbc, 0x6a, 0xda, 0x46, 0x1e, 0xca, 0x99, 0x37, 0x39, 0xe6, 0x90, 0xff, 0xaf, 0x0f, + 0x46, 0x69, 0x3b, 0xfd, 0x0d, 0x39, 0x94, 0x46, 0xb7, 0x59, 0x3d, 0x74, 0x1b, 0xbd, 0x06, 0xf8, + 0xcd, 0xa6, 0x7f, 0x3b, 0x39, 0xac, 0xcb, 0x0c, 0x8a, 0x05, 0x16, 0x3d, 0x03, 0x43, 0xad, 0x80, + 0xec, 0xb8, 0xbe, 0x90, 0xaf, 0x35, 0x4b, 0x53, 0x55, 0xc0, 0xb1, 0xa2, 0xa0, 0x77, 0xe3, 0xd0, + 0xf5, 0xa8, 0x2c, 0x51, 0xf7, 0xbd, 0x06, 0x37, 0x1f, 0x14, 0x45, 0x3e, 0x18, 0x0d, 0x8e, 0x0d, + 0x2a, 0x74, 0x13, 0x4a, 0xec, 0x3f, 0xdb, 0x76, 0x0e, 0x9e, 0x89, 0x5a, 0x64, 0xc8, 0x14, 0x0c, + 0x70, 0xcc, 0x0b, 0x3d, 0x07, 0x10, 0xc9, 0x9c, 0x0a, 0xa1, 0x88, 0xdd, 0xa7, 0xee, 0x22, 0x2a, + 0xdb, 0x42, 0x88, 0x35, 0x2a, 0xf4, 0x34, 0x94, 0x22, 0xc7, 0x6d, 0x5e, 0x75, 0x3d, 0x66, 0x80, + 0xa6, 0xed, 0x17, 0x89, 0x2a, 0x05, 0x10, 0xc7, 0x78, 0x2a, 0x0b, 0xb2, 0xa8, 0x2c, 0x3c, 0x0f, + 0xff, 0x10, 0xa3, 0x66, 0xb2, 0xe0, 0x55, 0x05, 0xc5, 0x1a, 0x05, 0xda, 0x84, 0x53, 0xae, 0xc7, + 0x72, 0xa7, 0x90, 0xda, 0x96, 0xdb, 0x5a, 0xbb, 0x5a, 0xbb, 0x41, 0x02, 0x77, 0x7d, 0x77, 0xde, + 0xa9, 0x6f, 0x11, 0x4f, 0xe6, 0x18, 0x96, 0xa9, 0xe7, 0x4f, 0x55, 0x3a, 0xd0, 0xe2, 0x8e, 0x9c, + 0xec, 0xe7, 0xd9, 0x7c, 0xbf, 0x56, 0x43, 0x4f, 0x19, 0x5b, 0xc7, 0x09, 0x7d, 0xeb, 0xd8, 0xdf, + 0x2b, 0x0f, 0x5c, 0xab, 0x69, 0xa1, 0x41, 0x5e, 0x82, 0xe3, 0x55, 0xbf, 0x51, 0xf5, 0x83, 0x68, + 0xd9, 0x0f, 0x6e, 0x3b, 0x41, 0x43, 0x4e, 0xaf, 0xb2, 0x0c, 0x8e, 0x42, 0xf7, 0xcf, 0x7e, 0xbe, + 0xbb, 0x18, 0x81, 0x4f, 0x9e, 0x67, 0x12, 0xdb, 0x01, 0x9f, 0xf4, 0xd5, 0x99, 0xec, 0xa0, 0xb2, + 0x0f, 0x5d, 0x72, 0x22, 0x82, 0xae, 0xc1, 0x68, 0x5d, 0x3f, 0x46, 0x45, 0xf1, 0x27, 0xe5, 0x41, + 0x66, 0x9c, 0xb1, 0x99, 0xe7, 0xae, 0x59, 0xde, 0xfe, 0xac, 0xa8, 0x84, 0x2b, 0x22, 0xb8, 0xdb, + 0x64, 0x2f, 0x69, 0xb8, 0x65, 0x7a, 0x92, 0x42, 0x7e, 0xe8, 0x39, 0x6e, 0xfa, 0xed, 0x98, 0x9e, + 0xc4, 0xfe, 0x6e, 0x38, 0x91, 0xac, 0xbe, 0xe7, 0x5c, 0xe0, 0x0b, 0x30, 0x19, 0xe8, 0x05, 0xb5, + 0x5c, 0x6f, 0xc7, 0x79, 0x4a, 0x89, 0x04, 0x12, 0xa7, 0xe9, 0xed, 0x17, 0x61, 0x92, 0x5e, 0x7e, + 0x95, 0x20, 0xc7, 0x7a, 0xb9, 0x7b, 0x94, 0x98, 0xff, 0xd0, 0xcf, 0x0e, 0xa2, 0x44, 0xe2, 0x1f, + 0xf4, 0x49, 0x18, 0x0b, 0xc9, 0x55, 0xd7, 0x6b, 0xdf, 0x91, 0xea, 0xaf, 0x0e, 0x6f, 0x59, 0x6b, + 0x4b, 0x3a, 0x25, 0xbf, 0x3f, 0x98, 0x30, 0x9c, 0xe0, 0x86, 0xb6, 0x61, 0xec, 0xb6, 0xeb, 0x35, + 0xfc, 0xdb, 0xa1, 0xe4, 0x3f, 0x94, 0xaf, 0x4b, 0xbf, 0xc9, 0x29, 0x13, 0x6d, 0x34, 0xaa, 0xbb, + 0x69, 0x30, 0xc3, 0x09, 0xe6, 0x74, 0xb1, 0x07, 0x6d, 0x6f, 0x2e, 0xbc, 0x1e, 0x12, 0xfe, 0x3a, + 0x51, 0x2c, 0x76, 0x2c, 0x81, 0x38, 0xc6, 0xd3, 0xc5, 0xce, 0xfe, 0x5c, 0x0a, 0xfc, 0x36, 0xcf, + 0x32, 0x23, 0x16, 0x3b, 0x56, 0x50, 0xac, 0x51, 0xd0, 0xcd, 0x90, 0xfd, 0x5b, 0xf5, 0x3d, 0xec, + 0xfb, 0x91, 0xdc, 0x3e, 0x59, 0x96, 0x34, 0x0d, 0x8e, 0x0d, 0x2a, 0xb4, 0x0c, 0x28, 0x6c, 0xb7, + 0x5a, 0x4d, 0xe6, 0x1e, 0xe7, 0x34, 0x19, 0x2b, 0xee, 0x37, 0x54, 0xe4, 0x51, 0xb2, 0x6b, 0x29, + 0x2c, 0xce, 0x28, 0x41, 0xcf, 0xc5, 0x75, 0xd1, 0xd4, 0x7e, 0xd6, 0x54, 0x6e, 0x77, 0xab, 0xf1, + 0x76, 0x4a, 0x1c, 0x5a, 0x82, 0xc1, 0x70, 0x37, 0xac, 0x47, 0xcd, 0xb0, 0x53, 0x4e, 0xba, 0x1a, + 0x23, 0xd1, 0x52, 0xa2, 0xf2, 0x22, 0x58, 0x96, 0x45, 0x75, 0x98, 0x12, 0x1c, 0x17, 0x36, 0x1d, + 0x4f, 0x65, 0xca, 0xe2, 0x6f, 0x05, 0x2e, 0xde, 0xdb, 0x2b, 0x4f, 0x89, 0x9a, 0x75, 0xf4, 0xfe, + 0x5e, 0x99, 0x2e, 0x8e, 0x0c, 0x0c, 0xce, 0xe2, 0xc6, 0x27, 0x5f, 0xbd, 0xee, 0x6f, 0xb7, 0xaa, + 0x81, 0xbf, 0xee, 0x36, 0x49, 0x27, 0xdb, 0x65, 0xcd, 0xa0, 0x14, 0x93, 0xcf, 0x80, 0xe1, 0x04, + 0x37, 0xfb, 0xb3, 0x4c, 0x76, 0xac, 0xb9, 0x1b, 0x9e, 0x13, 0xb5, 0x03, 0x82, 0xb6, 0x61, 0xb4, + 0xc5, 0x76, 0x17, 0x91, 0xfb, 0x45, 0xcc, 0xf5, 0x17, 0x7a, 0xd4, 0x7f, 0xdd, 0x66, 0xd9, 0xeb, + 0x0c, 0x5f, 0xbb, 0xaa, 0xce, 0x0e, 0x9b, 0xdc, 0xed, 0x7f, 0x71, 0x92, 0x49, 0x1f, 0x35, 0xae, + 0xd4, 0x1a, 0x14, 0x4f, 0x93, 0xc4, 0x35, 0x76, 0x26, 0x5f, 0xc3, 0x1b, 0x0f, 0x8b, 0x78, 0xde, + 0x84, 0x65, 0x59, 0xf4, 0x09, 0x18, 0xa3, 0xb7, 0x42, 0x25, 0x01, 0x84, 0xd3, 0xc7, 0xf2, 0x43, + 0xc8, 0x28, 0x2a, 0x3d, 0x2f, 0x94, 0x5e, 0x18, 0x27, 0x98, 0xa1, 0x37, 0x98, 0x6f, 0x9b, 0x64, + 0x5d, 0xe8, 0x85, 0xb5, 0xee, 0xc6, 0x26, 0xd9, 0x6a, 0x4c, 0x50, 0x1b, 0xa6, 0xd2, 0xd9, 0x2f, + 0xc3, 0x69, 0x3b, 0x5f, 0xbc, 0x4e, 0x27, 0xb0, 0x8c, 0x13, 0xf8, 0xa4, 0x71, 0x21, 0xce, 0xe2, + 0x8f, 0xae, 0x26, 0x73, 0x13, 0x16, 0x0d, 0xc5, 0x73, 0x2a, 0x3f, 0xe1, 0x68, 0xc7, 0xb4, 0x84, + 0x1b, 0x70, 0x5a, 0x4b, 0xef, 0x76, 0x29, 0x70, 0x98, 0xf7, 0x88, 0xcb, 0xb6, 0x53, 0x4d, 0x2e, + 0x7a, 0xe4, 0xde, 0x5e, 0xf9, 0xf4, 0x5a, 0x27, 0x42, 0xdc, 0x99, 0x0f, 0xba, 0x06, 0xc7, 0x79, + 0x00, 0x84, 0x45, 0xe2, 0x34, 0x9a, 0xae, 0xa7, 0x04, 0x2f, 0xbe, 0xe4, 0x4f, 0xde, 0xdb, 0x2b, + 0x1f, 0x9f, 0xcb, 0x22, 0xc0, 0xd9, 0xe5, 0xd0, 0xab, 0x50, 0x6a, 0x78, 0xa1, 0xe8, 0x83, 0x01, + 0x23, 0x83, 0x5e, 0x69, 0x71, 0xb5, 0xa6, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x02, 0x68, 0x83, 0x1b, + 0x27, 0x94, 0xba, 0x6a, 0x30, 0x15, 0x3a, 0x2e, 0xa9, 0xd1, 0x35, 0x9e, 0x40, 0x73, 0xab, 0x9c, + 0x7a, 0x19, 0x64, 0xbc, 0x8e, 0x36, 0x18, 0xa3, 0xd7, 0x01, 0x89, 0x4c, 0x0d, 0x73, 0x75, 0x96, + 0x58, 0x88, 0x1d, 0x8d, 0x43, 0xe6, 0xa3, 0xdc, 0x5a, 0x8a, 0x02, 0x67, 0x94, 0x42, 0x97, 0xe9, + 0xae, 0xa2, 0x43, 0xc5, 0xae, 0xa5, 0xf2, 0xb4, 0x2e, 0x92, 0x56, 0x40, 0x98, 0x93, 0x9b, 0xc9, + 0x11, 0x27, 0xca, 0xa1, 0x06, 0x9c, 0x72, 0xda, 0x91, 0xcf, 0xec, 0x3e, 0x26, 0xe9, 0x9a, 0xbf, + 0x45, 0x3c, 0x66, 0x72, 0x1d, 0x62, 0x21, 0xe9, 0x4e, 0xcd, 0x75, 0xa0, 0xc3, 0x1d, 0xb9, 0x50, + 0x89, 0x5c, 0x25, 0x66, 0x07, 0x33, 0x20, 0x5e, 0x46, 0x72, 0xf6, 0x17, 0x61, 0x78, 0xd3, 0x0f, + 0xa3, 0x55, 0x12, 0xdd, 0xf6, 0x83, 0x2d, 0x11, 0x1a, 0x3a, 0x0e, 0xc7, 0x1f, 0xa3, 0xb0, 0x4e, + 0x47, 0xaf, 0xdc, 0xcc, 0x21, 0xa8, 0xb2, 0xc8, 0x7c, 0x31, 0x86, 0xe2, 0x3d, 0xe6, 0x32, 0x07, + 0x63, 0x89, 0x97, 0xa4, 0x95, 0xea, 0x02, 0xf3, 0xab, 0x48, 0x90, 0x56, 0xaa, 0x0b, 0x58, 0xe2, + 0xe9, 0x74, 0x0d, 0x37, 0x9d, 0x80, 0x54, 0x03, 0xbf, 0x4e, 0x42, 0x2d, 0x09, 0xc4, 0xc3, 0x3c, + 0xf0, 0x35, 0x9d, 0xae, 0xb5, 0x2c, 0x02, 0x9c, 0x5d, 0x0e, 0x91, 0x74, 0x6a, 0xc3, 0xb1, 0x7c, + 0x83, 0x58, 0x5a, 0x9e, 0xe9, 0x31, 0xbb, 0xa1, 0x07, 0x13, 0x2a, 0xa9, 0x22, 0x0f, 0x75, 0x1d, + 0x4e, 0x8f, 0xb3, 0xb9, 0xdd, 0x7b, 0x9c, 0x6c, 0x65, 0x62, 0xac, 0x24, 0x38, 0xe1, 0x14, 0x6f, + 0x23, 0xe6, 0xe1, 0x44, 0xd7, 0x98, 0x87, 0x17, 0xa0, 0x14, 0xb6, 0x6f, 0x35, 0xfc, 0x6d, 0xc7, + 0xf5, 0x98, 0x5f, 0x85, 0x76, 0xf7, 0xab, 0x49, 0x04, 0x8e, 0x69, 0xd0, 0x32, 0x0c, 0x39, 0xd2, + 0x7e, 0x88, 0xf2, 0x63, 0x55, 0x29, 0xab, 0x21, 0x0f, 0xdf, 0x22, 0x2d, 0x86, 0xaa, 0x2c, 0x7a, + 0x05, 0x46, 0xc5, 0x03, 0x7e, 0x91, 0x87, 0x78, 0xca, 0x7c, 0x65, 0x59, 0xd3, 0x91, 0xd8, 0xa4, + 0x45, 0xd7, 0x61, 0x38, 0xf2, 0x9b, 0xec, 0xa9, 0x20, 0x15, 0xf3, 0x4e, 0xe4, 0x47, 0x5d, 0x5c, + 0x53, 0x64, 0xba, 0xda, 0x5c, 0x15, 0xc5, 0x3a, 0x1f, 0xb4, 0xc6, 0xe7, 0x3b, 0x4b, 0xf9, 0x40, + 0x42, 0x91, 0xc8, 0xf6, 0x74, 0x9e, 0x53, 0x1c, 0x23, 0x33, 0x97, 0x83, 0x28, 0x89, 0x75, 0x36, + 0xe8, 0x12, 0x4c, 0xb6, 0x02, 0xd7, 0x67, 0x73, 0x42, 0x99, 0x8e, 0xa7, 0xcd, 0x04, 0x6f, 0xd5, + 0x24, 0x01, 0x4e, 0x97, 0x61, 0xf1, 0x17, 0x04, 0x70, 0xfa, 0x24, 0x4f, 0x52, 0xc3, 0xaf, 0xd2, + 0x1c, 0x86, 0x15, 0x16, 0xad, 0xb0, 0x9d, 0x98, 0x6b, 0x81, 0xa6, 0x67, 0xf2, 0xc3, 0x63, 0xe9, + 0xda, 0x22, 0x2e, 0xbc, 0xaa, 0xbf, 0x38, 0xe6, 0x80, 0x1a, 0x5a, 0x6e, 0x58, 0x7a, 0x05, 0x08, + 0xa7, 0x4f, 0x75, 0xf0, 0xca, 0x4c, 0xdc, 0xca, 0x62, 0x81, 0xc0, 0x00, 0x87, 0x38, 0xc1, 0x13, + 0x7d, 0x04, 0x26, 0x44, 0x38, 0xd0, 0xb8, 0x9b, 0x4e, 0xc7, 0x4f, 0x2f, 0x70, 0x02, 0x87, 0x53, + 0xd4, 0x3c, 0x49, 0x8c, 0x73, 0xab, 0x49, 0xc4, 0xd6, 0x77, 0xd5, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, + 0x6c, 0x7f, 0x10, 0x49, 0x62, 0x92, 0x58, 0x9c, 0x51, 0x02, 0xad, 0xc1, 0x44, 0x2b, 0x20, 0x64, + 0x9b, 0x09, 0xfa, 0xe2, 0x3c, 0x2b, 0xf3, 0xf0, 0x23, 0xb4, 0x25, 0xd5, 0x04, 0x6e, 0x3f, 0x03, + 0x86, 0x53, 0x1c, 0xd0, 0x6d, 0x18, 0xf2, 0x77, 0x48, 0xb0, 0x49, 0x9c, 0xc6, 0xf4, 0xd9, 0x0e, + 0x0f, 0x82, 0xc4, 0xe1, 0x76, 0x4d, 0xd0, 0x26, 0xdc, 0x4d, 0x24, 0xb8, 0xbb, 0xbb, 0x89, 0xac, + 0x0c, 0xfd, 0xef, 0x16, 0x9c, 0x94, 0xd6, 0xa1, 0x5a, 0x8b, 0xf6, 0xfa, 0x82, 0xef, 0x85, 0x51, + 0xc0, 0x03, 0x66, 0x3c, 0x92, 0x1f, 0x44, 0x62, 0x2d, 0xa7, 0x90, 0x52, 0x44, 0x9f, 0xcc, 0xa3, + 0x08, 0x71, 0x7e, 0x8d, 0xf4, 0x6a, 0x1a, 0x92, 0x48, 0x6e, 0x46, 0x73, 0xe1, 0xf2, 0x1b, 0x8b, + 0xab, 0xd3, 0x8f, 0xf2, 0x68, 0x1f, 0x74, 0x31, 0xd4, 0x92, 0x48, 0x9c, 0xa6, 0x47, 0x17, 0xa1, + 0xe0, 0x87, 0xd3, 0x8f, 0x75, 0x48, 0x27, 0xec, 0x37, 0xae, 0xd5, 0xb8, 0xdb, 0xe1, 0xb5, 0x1a, + 0x2e, 0xf8, 0xa1, 0x4c, 0xd4, 0x42, 0xef, 0x63, 0xe1, 0xf4, 0xe3, 0x5c, 0x6d, 0x29, 0x13, 0xb5, + 0x30, 0x20, 0x8e, 0xf1, 0x68, 0x13, 0xc6, 0x43, 0xe3, 0xde, 0x1b, 0x4e, 0x9f, 0x63, 0x3d, 0xf5, + 0x78, 0xde, 0xa0, 0x19, 0xd4, 0x5a, 0x06, 0x05, 0x93, 0x0b, 0x4e, 0xb2, 0xe5, 0xab, 0x4b, 0xbb, + 0x79, 0x87, 0xd3, 0x4f, 0x74, 0x59, 0x5d, 0x1a, 0xb1, 0xbe, 0xba, 0x74, 0x1e, 0x38, 0xc1, 0x73, + 0xe6, 0xbb, 0x60, 0x32, 0x25, 0x2e, 0x1d, 0xc4, 0xc5, 0x7e, 0x66, 0x0b, 0x46, 0x8d, 0x29, 0xf9, + 0x40, 0xdd, 0x3b, 0x7e, 0xa7, 0x04, 0x25, 0x65, 0x76, 0x47, 0x17, 0x4c, 0x8f, 0x8e, 0x93, 0x49, + 0x8f, 0x8e, 0xa1, 0xaa, 0xdf, 0x30, 0x9c, 0x38, 0xd6, 0x32, 0x62, 0x42, 0xe6, 0x6d, 0x80, 0xbd, + 0xbf, 0x92, 0xd1, 0x4c, 0x09, 0xc5, 0x9e, 0x5d, 0x43, 0xfa, 0x3a, 0x5a, 0x27, 0x2e, 0xc1, 0xa4, + 0xe7, 0x33, 0x19, 0x9d, 0x34, 0xa4, 0x00, 0xc6, 0xe4, 0xac, 0x92, 0x1e, 0x64, 0x29, 0x41, 0x80, + 0xd3, 0x65, 0x68, 0x85, 0x5c, 0x50, 0x4a, 0x9a, 0x43, 0xb8, 0x1c, 0x85, 0x05, 0x96, 0xde, 0x0d, + 0xf9, 0xaf, 0x70, 0x7a, 0x22, 0xff, 0x6e, 0xc8, 0x0b, 0x25, 0x85, 0xb1, 0x50, 0x0a, 0x63, 0x4c, + 0xfb, 0xdf, 0xf2, 0x1b, 0x95, 0xaa, 0x10, 0xf3, 0xb5, 0x80, 0xc6, 0x8d, 0x4a, 0x15, 0x73, 0x1c, + 0x9a, 0x83, 0x01, 0xf6, 0x23, 0x9c, 0x1e, 0xc9, 0x0f, 0xca, 0xc3, 0x4a, 0x68, 0x89, 0xe2, 0x58, + 0x01, 0x2c, 0x0a, 0x32, 0xed, 0x2e, 0xbd, 0x1b, 0x31, 0xed, 0xee, 0xe0, 0x7d, 0x6a, 0x77, 0x25, + 0x03, 0x1c, 0xf3, 0x42, 0x77, 0xe0, 0xb8, 0x71, 0x1f, 0x55, 0xcf, 0x86, 0x20, 0xdf, 0xf0, 0x9b, + 0x20, 0x9e, 0x3f, 0x2d, 0x1a, 0x7d, 0xbc, 0x92, 0xc5, 0x09, 0x67, 0x57, 0x80, 0x9a, 0x30, 0x59, + 0x4f, 0xd5, 0x3a, 0xd4, 0x7b, 0xad, 0x6a, 0x5e, 0xa4, 0x6b, 0x4c, 0x33, 0x46, 0xaf, 0xc0, 0xd0, + 0xdb, 0x7e, 0xc8, 0x8e, 0x48, 0x71, 0x35, 0x91, 0x51, 0x25, 0x86, 0xde, 0xb8, 0x56, 0x63, 0xf0, + 0xfd, 0xbd, 0xf2, 0x70, 0xd5, 0x6f, 0xc8, 0xbf, 0x58, 0x15, 0x40, 0x3f, 0x60, 0xc1, 0x4c, 0xfa, + 0xc2, 0xab, 0x1a, 0x3d, 0xda, 0x7b, 0xa3, 0x6d, 0x51, 0xe9, 0xcc, 0x52, 0x2e, 0x3b, 0xdc, 0xa1, + 0x2a, 0xf4, 0x21, 0xba, 0x9e, 0x42, 0xf7, 0x2e, 0x11, 0x59, 0x76, 0x1f, 0x89, 0xd7, 0x13, 0x85, + 0xee, 0xef, 0x95, 0xc7, 0xf9, 0xce, 0xe8, 0xde, 0x95, 0xef, 0xab, 0x44, 0x01, 0xf4, 0xdd, 0x70, + 0x3c, 0x48, 0x6b, 0x50, 0x89, 0x14, 0xc2, 0x9f, 0xea, 0x65, 0x97, 0x4d, 0x0e, 0x38, 0xce, 0x62, + 0x88, 0xb3, 0xeb, 0xb1, 0x7f, 0xd5, 0x62, 0xfa, 0x6d, 0xd1, 0x2c, 0x12, 0xb6, 0x9b, 0x47, 0x91, + 0xdb, 0x7b, 0xc9, 0xb0, 0x1d, 0xdf, 0xb7, 0x67, 0xd3, 0x3f, 0xb2, 0x98, 0x67, 0xd3, 0x11, 0x3e, + 0xa3, 0x7a, 0x03, 0x86, 0x22, 0x99, 0x73, 0xbd, 0x43, 0x3a, 0x72, 0xad, 0x51, 0xcc, 0xbb, 0x4b, + 0x5d, 0x72, 0x54, 0x7a, 0x75, 0xc5, 0xc6, 0xfe, 0xfb, 0x7c, 0x04, 0x24, 0xe6, 0x08, 0x4c, 0x74, + 0x8b, 0xa6, 0x89, 0xae, 0xdc, 0xe5, 0x0b, 0x72, 0x4c, 0x75, 0x7f, 0xcf, 0x6c, 0x37, 0x53, 0xee, + 0xbd, 0xdb, 0x5d, 0xea, 0xec, 0x2f, 0x58, 0x00, 0x71, 0xac, 0xfb, 0x1e, 0xb2, 0x6a, 0xbe, 0x44, + 0xaf, 0x35, 0x7e, 0xe4, 0xd7, 0xfd, 0xa6, 0x30, 0x50, 0x9c, 0x8a, 0xad, 0x84, 0x1c, 0xbe, 0xaf, + 0xfd, 0xc6, 0x8a, 0x1a, 0x95, 0x65, 0x64, 0xcd, 0x62, 0x6c, 0xb7, 0x36, 0xa2, 0x6a, 0x7e, 0xd9, + 0x82, 0x63, 0x59, 0x3e, 0xf9, 0xf4, 0x92, 0xcc, 0xd5, 0x9c, 0xca, 0xdd, 0x51, 0x8d, 0xe6, 0x0d, + 0x01, 0xc7, 0x8a, 0xa2, 0xe7, 0x74, 0xa5, 0x07, 0x0b, 0x32, 0x7f, 0x0d, 0x46, 0xab, 0x01, 0xd1, + 0xe4, 0x8b, 0xd7, 0x78, 0xb4, 0x16, 0xde, 0x9e, 0x67, 0x0e, 0x1c, 0xa9, 0xc5, 0xfe, 0x4a, 0x01, + 0x8e, 0x71, 0xa7, 0x9d, 0xb9, 0x1d, 0xdf, 0x6d, 0x54, 0xfd, 0x86, 0x78, 0x49, 0xf9, 0x26, 0x8c, + 0xb4, 0x34, 0xdd, 0x74, 0xa7, 0x80, 0xc9, 0xba, 0x0e, 0x3b, 0xd6, 0xa6, 0xe9, 0x50, 0x6c, 0xf0, + 0x42, 0x0d, 0x18, 0x21, 0x3b, 0x6e, 0x5d, 0x79, 0x7e, 0x14, 0x0e, 0x7c, 0x48, 0xab, 0x5a, 0x96, + 0x34, 0x3e, 0xd8, 0xe0, 0xda, 0xb3, 0xab, 0xad, 0x26, 0xa2, 0xf5, 0x75, 0xf1, 0xf6, 0xf8, 0x51, + 0x0b, 0x1e, 0xca, 0x09, 0xaf, 0x4c, 0xab, 0xbb, 0xcd, 0xdc, 0xa3, 0xc4, 0xb4, 0x55, 0xd5, 0x71, + 0xa7, 0x29, 0x2c, 0xb0, 0xe8, 0xa3, 0x00, 0xdc, 0xe9, 0x89, 0x78, 0xf5, 0xae, 0x71, 0x68, 0x8d, + 0x10, 0x9a, 0x5a, 0x34, 0x44, 0x59, 0x1e, 0x6b, 0xbc, 0xec, 0x2f, 0xf7, 0x41, 0x3f, 0x73, 0xb2, + 0x41, 0x55, 0x18, 0xdc, 0xe4, 0xc9, 0xba, 0x3a, 0x8e, 0x1b, 0xa5, 0x95, 0xf9, 0xbf, 0xe2, 0x71, + 0xd3, 0xa0, 0x58, 0xb2, 0x41, 0x2b, 0x30, 0xc5, 0x73, 0xa6, 0x35, 0x17, 0x49, 0xd3, 0xd9, 0x95, + 0x6a, 0x5f, 0x9e, 0x06, 0x5c, 0xa9, 0xbf, 0x2b, 0x69, 0x12, 0x9c, 0x55, 0x0e, 0xbd, 0x06, 0x63, + 0xf4, 0x1a, 0xee, 0xb7, 0x23, 0xc9, 0x89, 0x67, 0x4b, 0x53, 0x37, 0x93, 0x35, 0x03, 0x8b, 0x13, + 0xd4, 0xe8, 0x15, 0x18, 0x6d, 0xa5, 0x14, 0xdc, 0xfd, 0xb1, 0x26, 0xc8, 0x54, 0x6a, 0x9b, 0xb4, + 0xcc, 0x2d, 0xbf, 0xcd, 0x1e, 0x21, 0xac, 0x6d, 0x06, 0x24, 0xdc, 0xf4, 0x9b, 0x0d, 0x26, 0x01, + 0xf7, 0x6b, 0x6e, 0xf9, 0x09, 0x3c, 0x4e, 0x95, 0xa0, 0x5c, 0xd6, 0x1d, 0xb7, 0xd9, 0x0e, 0x48, + 0xcc, 0x65, 0xc0, 0xe4, 0xb2, 0x9c, 0xc0, 0xe3, 0x54, 0x89, 0xee, 0x9a, 0xfb, 0xc1, 0xc3, 0xd1, + 0xdc, 0xdb, 0x7f, 0xab, 0x00, 0xc6, 0xd0, 0x7e, 0x07, 0x67, 0x71, 0x7b, 0x15, 0xfa, 0x36, 0x82, + 0x56, 0x5d, 0x38, 0x94, 0x65, 0x7e, 0x59, 0x9c, 0xc2, 0x99, 0x7f, 0x19, 0xfd, 0x8f, 0x59, 0x29, + 0xba, 0xc6, 0x8f, 0x57, 0x03, 0x9f, 0x1e, 0x72, 0x32, 0x9e, 0x9f, 0x7a, 0xfd, 0x32, 0x28, 0xa3, + 0x1c, 0x74, 0x88, 0x7c, 0x2b, 0xde, 0x07, 0x70, 0x0e, 0x86, 0xef, 0x55, 0x4d, 0x84, 0x1b, 0x91, + 0x5c, 0xd0, 0x45, 0x18, 0x16, 0x89, 0xb5, 0xd8, 0x23, 0x0d, 0xbe, 0x98, 0x98, 0xaf, 0xd8, 0x62, + 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, 0x83, 0x05, 0x98, 0xca, 0x78, 0x65, 0xc7, 0x8f, 0x91, 0x0d, 0x37, + 0x8c, 0x54, 0x96, 0x68, 0xed, 0x18, 0xe1, 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, + 0xe1, 0x24, 0x5e, 0xb1, 0x08, 0xec, 0x01, 0xf3, 0x2d, 0x9f, 0x85, 0xbe, 0x76, 0x48, 0x64, 0xcc, + 0x6a, 0x75, 0x6c, 0x33, 0xb3, 0x36, 0xc3, 0xd0, 0x2b, 0xe0, 0x86, 0xb2, 0x10, 0x6b, 0x57, 0x40, + 0x6e, 0x23, 0xe6, 0x38, 0xda, 0xb8, 0x88, 0x78, 0x8e, 0x17, 0x89, 0x8b, 0x62, 0x1c, 0x7c, 0x95, + 0x41, 0xb1, 0xc0, 0xda, 0x5f, 0x2a, 0xc2, 0xc9, 0xdc, 0x77, 0xb7, 0xb4, 0xe9, 0xdb, 0xbe, 0xe7, + 0x46, 0xbe, 0x72, 0xc2, 0xe3, 0x01, 0x57, 0x49, 0x6b, 0x73, 0x45, 0xc0, 0xb1, 0xa2, 0x40, 0xe7, + 0xa0, 0x9f, 0x29, 0xc5, 0x53, 0xf9, 0xb2, 0xe7, 0x17, 0x79, 0x04, 0x3e, 0x8e, 0xd6, 0x4e, 0xf5, + 0x62, 0xc7, 0x53, 0xfd, 0x51, 0x2a, 0xc1, 0xf8, 0xcd, 0xe4, 0x81, 0x42, 0x9b, 0xeb, 0xfb, 0x4d, + 0xcc, 0x90, 0xe8, 0x71, 0xd1, 0x5f, 0x09, 0xaf, 0x33, 0xec, 0x34, 0xfc, 0x50, 0xeb, 0xb4, 0x27, + 0x61, 0x70, 0x8b, 0xec, 0x06, 0xae, 0xb7, 0x91, 0xf4, 0x46, 0xbc, 0xc2, 0xc1, 0x58, 0xe2, 0xcd, + 0xd4, 0xad, 0x83, 0x87, 0x91, 0xba, 0x55, 0x9f, 0x01, 0x43, 0x5d, 0xc5, 0x93, 0x1f, 0x2a, 0xc2, + 0x38, 0x9e, 0x5f, 0x7c, 0x6f, 0x20, 0xae, 0xa7, 0x07, 0xe2, 0x30, 0x32, 0x9c, 0x1e, 0x6c, 0x34, + 0x7e, 0xc9, 0x82, 0x71, 0x96, 0xde, 0x4b, 0x04, 0xcd, 0x70, 0x7d, 0xef, 0x08, 0xae, 0x02, 0x8f, + 0x42, 0x7f, 0x40, 0x2b, 0x4d, 0x26, 0xca, 0x66, 0x2d, 0xc1, 0x1c, 0x87, 0x4e, 0x41, 0x1f, 0x6b, + 0x02, 0x1d, 0xbc, 0x11, 0xbe, 0x05, 0x2f, 0x3a, 0x91, 0x83, 0x19, 0x94, 0xc5, 0x9f, 0xc3, 0xa4, + 0xd5, 0x74, 0x79, 0xa3, 0x63, 0x97, 0x85, 0x77, 0x47, 0x44, 0x8e, 0xcc, 0xa6, 0xbd, 0xb3, 0xf8, + 0x73, 0xd9, 0x2c, 0x3b, 0x5f, 0xb3, 0xff, 0xa2, 0x00, 0x67, 0x32, 0xcb, 0xf5, 0x1c, 0x7f, 0xae, + 0x73, 0xe9, 0x07, 0x99, 0x86, 0xa9, 0x78, 0x84, 0xbe, 0xde, 0x7d, 0xbd, 0x4a, 0xff, 0xfd, 0x3d, + 0x84, 0x85, 0xcb, 0xec, 0xb2, 0x77, 0x49, 0x58, 0xb8, 0xcc, 0xb6, 0xe5, 0xa8, 0x09, 0xfe, 0xa6, + 0x90, 0xf3, 0x2d, 0x4c, 0x61, 0x70, 0x9e, 0xee, 0x33, 0x0c, 0x19, 0xca, 0x4b, 0x38, 0xdf, 0x63, + 0x38, 0x0c, 0x2b, 0x2c, 0x9a, 0x83, 0xf1, 0x6d, 0xd7, 0xa3, 0x9b, 0xcf, 0xae, 0x29, 0x8a, 0x2b, + 0x5b, 0xc6, 0x8a, 0x89, 0xc6, 0x49, 0x7a, 0xe4, 0x6a, 0x21, 0xe3, 0xf8, 0xd7, 0xbd, 0x72, 0xa0, + 0x55, 0x37, 0x6b, 0xba, 0x73, 0xa8, 0x5e, 0xcc, 0x08, 0x1f, 0xb7, 0xa2, 0xe9, 0x89, 0x8a, 0xbd, + 0xeb, 0x89, 0x46, 0xb2, 0x75, 0x44, 0x33, 0xaf, 0xc0, 0xe8, 0x7d, 0xdb, 0x46, 0xec, 0x6f, 0x14, + 0xe1, 0xe1, 0x0e, 0xcb, 0x9e, 0xef, 0xf5, 0xc6, 0x18, 0x68, 0x7b, 0x7d, 0x6a, 0x1c, 0xaa, 0x70, + 0x6c, 0xbd, 0xdd, 0x6c, 0xee, 0xb2, 0x47, 0x4d, 0xa4, 0x21, 0x29, 0x84, 0x4c, 0x29, 0x95, 0x23, + 0xc7, 0x96, 0x33, 0x68, 0x70, 0x66, 0x49, 0x7a, 0xc5, 0xa2, 0x27, 0xc9, 0xae, 0x62, 0x95, 0xb8, + 0x62, 0x61, 0x1d, 0x89, 0x4d, 0x5a, 0x74, 0x09, 0x26, 0x9d, 0x1d, 0xc7, 0xe5, 0x71, 0xf7, 0x25, + 0x03, 0x7e, 0xc7, 0x52, 0xba, 0xe8, 0xb9, 0x24, 0x01, 0x4e, 0x97, 0x41, 0xaf, 0x03, 0xf2, 0x6f, + 0xb1, 0x87, 0x12, 0x8d, 0x4b, 0xc4, 0x13, 0x56, 0x77, 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x96, + 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x91, 0xd1, 0x06, 0xf2, 0x23, 0xa3, 0x75, 0xde, 0x17, 0xbb, 0x66, + 0x00, 0xbb, 0x08, 0xa3, 0x07, 0x74, 0xff, 0xb5, 0xff, 0x8d, 0x05, 0x4a, 0x41, 0x6c, 0x06, 0x1f, + 0x7e, 0x85, 0xf9, 0x27, 0x73, 0xd5, 0xb6, 0x16, 0xae, 0xe9, 0xb8, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, + 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, 0x6c, 0xdc, 0x0a, 0x44, 0x6c, 0x44, 0x45, 0x81, 0x3e, 0x06, + 0x83, 0x0d, 0x77, 0xc7, 0x0d, 0x85, 0x72, 0xec, 0xc0, 0xc6, 0xb8, 0x78, 0xeb, 0x5c, 0xe4, 0x6c, + 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, 0xee, 0x93, 0x37, 0xda, 0x7e, 0xe4, 0x1c, 0xc1, 0x49, 0x7e, + 0xc9, 0x38, 0xc9, 0x1f, 0xcf, 0x1e, 0x68, 0xad, 0x49, 0xb9, 0x27, 0xf8, 0xb5, 0xc4, 0x09, 0xfe, + 0x44, 0x77, 0x56, 0x9d, 0x4f, 0xee, 0x7f, 0x60, 0xc1, 0xa4, 0x41, 0x7f, 0x04, 0x07, 0xc8, 0xb2, + 0x79, 0x80, 0x3c, 0xd2, 0xf5, 0x1b, 0x72, 0x0e, 0x8e, 0xef, 0x2f, 0x26, 0xda, 0xce, 0x0e, 0x8c, + 0xb7, 0xa1, 0x6f, 0xd3, 0x09, 0x1a, 0x9d, 0xd2, 0xe2, 0xa4, 0x0a, 0xcd, 0x5e, 0x76, 0x02, 0xe1, + 0xa9, 0xf0, 0x8c, 0xec, 0x75, 0x0a, 0xea, 0xea, 0xa5, 0xc0, 0xaa, 0x42, 0x2f, 0xc1, 0x40, 0x58, + 0xf7, 0x5b, 0xea, 0xcd, 0x14, 0xcb, 0xbc, 0x5a, 0x63, 0x90, 0xfd, 0xbd, 0x32, 0x32, 0xab, 0xa3, + 0x60, 0x2c, 0xe8, 0xd1, 0x9b, 0x30, 0xca, 0x7e, 0x29, 0xb7, 0xc1, 0x62, 0xbe, 0x06, 0xa3, 0xa6, + 0x13, 0x72, 0x9f, 0x5a, 0x03, 0x84, 0x4d, 0x56, 0x33, 0x1b, 0x50, 0x52, 0x9f, 0xf5, 0x40, 0xad, + 0xdd, 0xff, 0xaa, 0x08, 0x53, 0x19, 0x73, 0x0e, 0x85, 0xc6, 0x48, 0x5c, 0xec, 0x71, 0xaa, 0xbe, + 0xc3, 0xb1, 0x08, 0xd9, 0x05, 0xaa, 0x21, 0xe6, 0x56, 0xcf, 0x95, 0x5e, 0x0f, 0x49, 0xb2, 0x52, + 0x0a, 0xea, 0x5e, 0x29, 0xad, 0xec, 0xc8, 0xba, 0x9a, 0x56, 0xa4, 0x5a, 0xfa, 0x40, 0xc7, 0xf4, + 0x37, 0xfa, 0xe0, 0x58, 0x56, 0xcc, 0x5a, 0xf4, 0x99, 0x44, 0x3a, 0xe7, 0x17, 0x3a, 0xf5, 0xb0, + 0x5e, 0x92, 0xe7, 0x78, 0x16, 0x71, 0x28, 0x67, 0xcd, 0x04, 0xcf, 0x5d, 0xbb, 0x59, 0xd4, 0xc9, + 0x22, 0xe0, 0x04, 0x3c, 0x0d, 0xb7, 0xdc, 0x3e, 0x3e, 0xd0, 0x73, 0x03, 0x44, 0xfe, 0xee, 0x30, + 0xe1, 0x92, 0x24, 0xc1, 0xdd, 0x5d, 0x92, 0x64, 0xcd, 0xa8, 0x02, 0x03, 0x75, 0xee, 0xeb, 0x52, + 0xec, 0xbe, 0x85, 0x71, 0x47, 0x17, 0xb5, 0x01, 0x0b, 0x07, 0x17, 0xc1, 0x60, 0xc6, 0x85, 0x61, + 0xad, 0x63, 0x1e, 0xe8, 0xe4, 0xd9, 0xa2, 0x07, 0x9f, 0xd6, 0x05, 0x0f, 0x74, 0x02, 0xfd, 0xa8, + 0x05, 0x89, 0x07, 0x2f, 0x4a, 0x29, 0x67, 0xe5, 0x2a, 0xe5, 0xce, 0x42, 0x5f, 0xe0, 0x37, 0x49, + 0x32, 0x11, 0x32, 0xf6, 0x9b, 0x04, 0x33, 0x0c, 0xa5, 0x88, 0x62, 0x55, 0xcb, 0x88, 0x7e, 0x8d, + 0x14, 0x17, 0xc4, 0x47, 0xa1, 0xbf, 0x49, 0x76, 0x48, 0x33, 0x99, 0xaf, 0xee, 0x2a, 0x05, 0x62, + 0x8e, 0xb3, 0x7f, 0xa9, 0x0f, 0x4e, 0x77, 0x0c, 0x47, 0x45, 0x2f, 0x63, 0x1b, 0x4e, 0x44, 0x6e, + 0x3b, 0xbb, 0xc9, 0xc4, 0x52, 0x97, 0x38, 0x18, 0x4b, 0x3c, 0x7b, 0xfe, 0xc9, 0xf3, 0x43, 0x24, + 0x54, 0x98, 0x22, 0x2d, 0x84, 0xc0, 0x9a, 0x2a, 0xb1, 0xe2, 0x61, 0xa8, 0xc4, 0x9e, 0x03, 0x08, + 0xc3, 0x26, 0x77, 0x0b, 0x6c, 0x88, 0x77, 0xa5, 0x71, 0x1e, 0x91, 0xda, 0x55, 0x81, 0xc1, 0x1a, + 0x15, 0x5a, 0x84, 0x89, 0x56, 0xe0, 0x47, 0x5c, 0x23, 0xbc, 0xc8, 0x3d, 0x67, 0xfb, 0xcd, 0x48, + 0x40, 0xd5, 0x04, 0x1e, 0xa7, 0x4a, 0xa0, 0x17, 0x61, 0x58, 0x44, 0x07, 0xaa, 0xfa, 0x7e, 0x53, + 0x28, 0xa1, 0x94, 0x33, 0x69, 0x2d, 0x46, 0x61, 0x9d, 0x4e, 0x2b, 0xc6, 0xd4, 0xcc, 0x83, 0x99, + 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, 0x84, 0xc2, 0x1e, 0xea, 0x29, 0x14, 0x76, 0xac, 0x96, 0x2b, + 0xf5, 0x6c, 0xf5, 0x84, 0xae, 0x8a, 0xac, 0xaf, 0xf6, 0xc1, 0x94, 0x98, 0x38, 0x0f, 0x7a, 0xba, + 0x5c, 0x4f, 0x4f, 0x97, 0xc3, 0x50, 0xdc, 0xbd, 0x37, 0x67, 0x8e, 0x7a, 0xce, 0xfc, 0xb0, 0x05, + 0xa6, 0xa4, 0x86, 0xfe, 0xd7, 0xdc, 0xcc, 0x7c, 0x2f, 0xe6, 0x4a, 0x7e, 0x71, 0x98, 0xe1, 0x77, + 0x96, 0xa3, 0xcf, 0xfe, 0xd7, 0x16, 0x3c, 0xd2, 0x95, 0x23, 0x5a, 0x82, 0x12, 0x13, 0x27, 0xb5, + 0x8b, 0xde, 0x13, 0xca, 0xb3, 0x5e, 0x22, 0x72, 0xa4, 0xdb, 0xb8, 0x24, 0x5a, 0x4a, 0xa5, 0x40, + 0x7c, 0x32, 0x23, 0x05, 0xe2, 0x71, 0xa3, 0x7b, 0xee, 0x33, 0x07, 0xe2, 0x17, 0xe9, 0x89, 0x63, + 0xbc, 0x6a, 0x43, 0x1f, 0x30, 0x94, 0x8e, 0x76, 0x42, 0xe9, 0x88, 0x4c, 0x6a, 0xed, 0x0c, 0xf9, + 0x08, 0x4c, 0xb0, 0xb0, 0x81, 0xec, 0x9d, 0x87, 0x78, 0x6f, 0x57, 0x88, 0x7d, 0xb9, 0xaf, 0x26, + 0x70, 0x38, 0x45, 0x6d, 0xff, 0x59, 0x11, 0x06, 0xf8, 0xf2, 0x3b, 0x82, 0xeb, 0xe5, 0xd3, 0x50, + 0x72, 0xb7, 0xb7, 0xdb, 0x3c, 0xab, 0x5d, 0x7f, 0xec, 0x19, 0x5c, 0x91, 0x40, 0x1c, 0xe3, 0xd1, + 0xb2, 0xd0, 0x77, 0x77, 0x88, 0x4c, 0xcc, 0x1b, 0x3e, 0xbb, 0xe8, 0x44, 0x0e, 0x97, 0x95, 0xd4, + 0x39, 0x1b, 0x6b, 0xc6, 0xd1, 0x27, 0x01, 0xc2, 0x28, 0x70, 0xbd, 0x0d, 0x0a, 0x13, 0xc1, 0xdd, + 0x9f, 0xea, 0xc0, 0xad, 0xa6, 0x88, 0x39, 0xcf, 0x78, 0xcf, 0x51, 0x08, 0xac, 0x71, 0x44, 0xb3, + 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, 0x31, 0x9b, 0xf9, 0x20, 0x94, 0x14, 0xf3, + 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, 0x27, 0xda, 0x76, 0x20, 0xe5, 0xd9, 0x2f, + 0x5b, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x88, 0xd3, 0xe0, 0x2e, 0x1c, 0x6b, 0x66, 0xec, 0xca, + 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, 0x8b, 0x33, 0xeb, 0x40, 0xe7, 0xe9, 0x8a, + 0xa3, 0xbb, 0xae, 0xd3, 0x14, 0xf1, 0x0d, 0x46, 0xf8, 0x6a, 0xe3, 0x30, 0xac, 0xb0, 0xf6, 0x1f, + 0x5a, 0x30, 0xc9, 0x5b, 0x7e, 0x85, 0xec, 0xaa, 0xbd, 0xe9, 0x5b, 0xd9, 0x76, 0x91, 0x4f, 0xb5, + 0x90, 0x93, 0x4f, 0x55, 0xff, 0xb4, 0x62, 0xc7, 0x4f, 0xfb, 0x8a, 0x05, 0x62, 0x86, 0x1c, 0x81, + 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, 0xc8, 0x51, 0x64, 0xfc, 0xb5, 0x05, 0x13, + 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0x61, 0xde, 0xfc, 0xa2, 0x4c, 0xe7, 0xcb, 0x2b, 0x64, + 0x77, 0xcd, 0xaf, 0x3a, 0xd1, 0x66, 0xf6, 0x47, 0x19, 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0x1a, 0x72, + 0x01, 0x19, 0xe9, 0xc6, 0xba, 0x04, 0x08, 0x38, 0x68, 0xba, 0x31, 0xfb, 0xcf, 0x2d, 0x40, 0xbc, + 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, 0x5d, 0xbc, 0x35, 0x29, 0x0c, 0xd6, 0xa8, + 0x0e, 0xa5, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, 0x17, 0x07, 0xe8, 0xd1, 0x7f, 0x36, 0x00, + 0xc9, 0x97, 0x7d, 0xe8, 0x06, 0x8c, 0xd4, 0x9d, 0x96, 0x73, 0xcb, 0x6d, 0xba, 0x91, 0x4b, 0xc2, + 0x4e, 0xde, 0x58, 0x0b, 0x1a, 0x9d, 0x30, 0x91, 0x6b, 0x10, 0x6c, 0xf0, 0x41, 0xb3, 0x00, 0xad, + 0xc0, 0xdd, 0x71, 0x9b, 0x64, 0x83, 0xa9, 0x5d, 0x58, 0x44, 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, + 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc0, 0x61, 0x14, 0xe0, 0xc8, 0xc2, 0x28, 0xf4, 0x1d, 0x28, + 0x8c, 0xc2, 0xd0, 0x81, 0xc3, 0x28, 0xf4, 0xf7, 0x14, 0x46, 0x01, 0xc3, 0x09, 0x29, 0x7b, 0xd2, + 0xff, 0xcb, 0x6e, 0x93, 0x88, 0x0b, 0x07, 0x0f, 0x03, 0x33, 0x73, 0x6f, 0xaf, 0x7c, 0x02, 0x67, + 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0xa3, 0x30, 0xed, 0x34, 0x9b, 0xfe, 0x6d, 0x35, 0xa8, 0x4b, 0x61, + 0xdd, 0x69, 0x72, 0x13, 0xc8, 0x20, 0xe3, 0x7a, 0xea, 0xde, 0x5e, 0x79, 0x7a, 0x2e, 0x87, 0x06, + 0xe7, 0x96, 0x46, 0xaf, 0x42, 0xa9, 0x15, 0xf8, 0xf5, 0x15, 0xed, 0xf9, 0xf1, 0x19, 0xda, 0x81, + 0x55, 0x09, 0xdc, 0xdf, 0x2b, 0x8f, 0xaa, 0x3f, 0xec, 0xc0, 0x8f, 0x0b, 0x64, 0xc4, 0x45, 0x18, + 0x3e, 0xd4, 0xb8, 0x08, 0x5b, 0x30, 0x55, 0x23, 0x81, 0xeb, 0x34, 0xdd, 0xbb, 0x54, 0x5e, 0x96, + 0xfb, 0xd3, 0x1a, 0x94, 0x82, 0xc4, 0x8e, 0xdc, 0x53, 0xb4, 0x60, 0x2d, 0xe3, 0x93, 0xdc, 0x81, + 0x63, 0x46, 0xf6, 0x7f, 0xb5, 0x60, 0x50, 0xbc, 0xe4, 0x3b, 0x02, 0xa9, 0x71, 0xce, 0x30, 0x4a, + 0x94, 0xb3, 0x3b, 0x8c, 0x35, 0x26, 0xd7, 0x1c, 0x51, 0x49, 0x98, 0x23, 0x1e, 0xe9, 0xc4, 0xa4, + 0xb3, 0x21, 0xe2, 0xff, 0x2d, 0x52, 0xe9, 0xdd, 0x78, 0x53, 0xfe, 0xe0, 0xbb, 0x60, 0x15, 0x06, + 0x43, 0xf1, 0xa6, 0xb9, 0x90, 0xff, 0x1a, 0x24, 0x39, 0x88, 0xb1, 0x17, 0x9d, 0x78, 0xc5, 0x2c, + 0x99, 0x64, 0x3e, 0x96, 0x2e, 0x3e, 0xc0, 0xc7, 0xd2, 0xdd, 0x5e, 0xdd, 0xf7, 0x1d, 0xc6, 0xab, + 0x7b, 0xfb, 0xeb, 0xec, 0xe4, 0xd4, 0xe1, 0x47, 0x20, 0x54, 0x5d, 0x32, 0xcf, 0x58, 0xbb, 0xc3, + 0xcc, 0x12, 0x8d, 0xca, 0x11, 0xae, 0x7e, 0xd1, 0x82, 0xd3, 0x19, 0x5f, 0xa5, 0x49, 0x5a, 0xcf, + 0xc0, 0x90, 0xd3, 0x6e, 0xb8, 0x6a, 0x2d, 0x6b, 0xa6, 0xc9, 0x39, 0x01, 0xc7, 0x8a, 0x02, 0x2d, + 0xc0, 0x24, 0xb9, 0xd3, 0x72, 0xb9, 0x21, 0x57, 0x77, 0x3e, 0x2e, 0xf2, 0xe7, 0x9f, 0x4b, 0x49, + 0x24, 0x4e, 0xd3, 0xab, 0x00, 0x51, 0xc5, 0xdc, 0x00, 0x51, 0x3f, 0x6f, 0xc1, 0xb0, 0x7a, 0xd5, + 0xfb, 0xc0, 0x7b, 0xfb, 0x23, 0x66, 0x6f, 0x3f, 0xdc, 0xa1, 0xb7, 0x73, 0xba, 0xf9, 0xf7, 0x0b, + 0xaa, 0xbd, 0x55, 0x3f, 0x88, 0x7a, 0x90, 0xe0, 0xee, 0xff, 0xe1, 0xc4, 0x45, 0x18, 0x76, 0x5a, + 0x2d, 0x89, 0x90, 0x1e, 0x70, 0x2c, 0xf6, 0x7b, 0x0c, 0xc6, 0x3a, 0x8d, 0x7a, 0xc7, 0x51, 0xcc, + 0x7d, 0xc7, 0xd1, 0x00, 0x88, 0x9c, 0x60, 0x83, 0x44, 0x14, 0x26, 0x1c, 0x76, 0xf3, 0xf7, 0x9b, + 0x76, 0xe4, 0x36, 0x67, 0x5d, 0x2f, 0x0a, 0xa3, 0x60, 0xb6, 0xe2, 0x45, 0xd7, 0x02, 0x7e, 0x85, + 0xd4, 0x42, 0xac, 0x29, 0x5e, 0x58, 0xe3, 0x2b, 0x23, 0x58, 0xb0, 0x3a, 0xfa, 0x4d, 0x57, 0x8a, + 0x55, 0x01, 0xc7, 0x8a, 0xc2, 0xfe, 0x20, 0x3b, 0x7d, 0x58, 0x9f, 0x1e, 0x2c, 0xbc, 0xd8, 0x4f, + 0x8e, 0xa8, 0xd1, 0x60, 0x46, 0xd1, 0x45, 0x3d, 0x88, 0x59, 0xe7, 0xcd, 0x9e, 0x56, 0xac, 0xbf, + 0x88, 0x8c, 0x23, 0x9d, 0xa1, 0x8f, 0xa7, 0xdc, 0x63, 0x9e, 0xed, 0x72, 0x6a, 0x1c, 0xc0, 0x21, + 0x86, 0x25, 0x82, 0x62, 0x69, 0x72, 0x2a, 0x55, 0xb1, 0x2e, 0xb4, 0x44, 0x50, 0x02, 0x81, 0x63, + 0x1a, 0x2a, 0x4c, 0xa9, 0x3f, 0xe1, 0x34, 0x8a, 0x83, 0x11, 0x2b, 0xea, 0x10, 0x6b, 0x14, 0xe8, + 0x82, 0x50, 0x28, 0x70, 0xbb, 0xc0, 0xc3, 0x09, 0x85, 0x82, 0xec, 0x2e, 0x4d, 0x0b, 0x74, 0x11, + 0x86, 0xc9, 0x9d, 0x88, 0x04, 0x9e, 0xd3, 0xa4, 0x35, 0xf4, 0xc7, 0xf1, 0x33, 0x97, 0x62, 0x30, + 0xd6, 0x69, 0xd0, 0x1a, 0x8c, 0x87, 0x5c, 0xcf, 0xa6, 0xa2, 0xd4, 0x73, 0x7d, 0xe5, 0x53, 0xea, + 0x3d, 0xb5, 0x89, 0xde, 0x67, 0x20, 0xbe, 0x3b, 0xc9, 0x28, 0x13, 0x49, 0x16, 0xe8, 0x35, 0x18, + 0x6b, 0xfa, 0x4e, 0x63, 0xde, 0x69, 0x3a, 0x5e, 0x9d, 0xf5, 0xcf, 0x90, 0x99, 0x0f, 0xfb, 0xaa, + 0x81, 0xc5, 0x09, 0x6a, 0x2a, 0xbc, 0xe9, 0x10, 0x11, 0xa6, 0xcd, 0xf1, 0x36, 0x48, 0x28, 0xd2, + 0xd2, 0x33, 0xe1, 0xed, 0x6a, 0x0e, 0x0d, 0xce, 0x2d, 0x8d, 0x5e, 0x82, 0x11, 0xf9, 0xf9, 0x5a, + 0x50, 0x96, 0xf8, 0x49, 0x8c, 0x86, 0xc3, 0x06, 0x25, 0x0a, 0xe1, 0xb8, 0xfc, 0xbf, 0x16, 0x38, + 0xeb, 0xeb, 0x6e, 0x5d, 0x44, 0x2a, 0xe0, 0xcf, 0x87, 0x3f, 0x2c, 0xdf, 0x2a, 0x2e, 0x65, 0x11, + 0xed, 0xef, 0x95, 0x4f, 0x89, 0x5e, 0xcb, 0xc4, 0xe3, 0x6c, 0xde, 0x68, 0x05, 0xa6, 0x36, 0x89, + 0xd3, 0x8c, 0x36, 0x17, 0x36, 0x49, 0x7d, 0x4b, 0x2e, 0x38, 0x16, 0xe6, 0x45, 0x7b, 0x3a, 0x72, + 0x39, 0x4d, 0x82, 0xb3, 0xca, 0xa1, 0xb7, 0x60, 0xba, 0xd5, 0xbe, 0xd5, 0x74, 0xc3, 0xcd, 0x55, + 0x3f, 0x62, 0x4e, 0x48, 0x73, 0x8d, 0x46, 0x40, 0x42, 0xfe, 0xba, 0x94, 0x1d, 0xbd, 0x32, 0x90, + 0x4e, 0x35, 0x87, 0x0e, 0xe7, 0x72, 0x40, 0x77, 0xe1, 0x78, 0x62, 0x22, 0x88, 0x88, 0x18, 0x63, + 0xf9, 0x39, 0x6a, 0x6a, 0x59, 0x05, 0x44, 0x70, 0x99, 0x2c, 0x14, 0xce, 0xae, 0x02, 0xbd, 0x0c, + 0xe0, 0xb6, 0x96, 0x9d, 0x6d, 0xb7, 0x49, 0xaf, 0x8a, 0x53, 0x6c, 0x8e, 0xd0, 0x6b, 0x03, 0x54, + 0xaa, 0x12, 0x4a, 0xf7, 0x66, 0xf1, 0x6f, 0x17, 0x6b, 0xd4, 0xe8, 0x2a, 0x8c, 0x89, 0x7f, 0xbb, + 0x62, 0x48, 0x79, 0x60, 0x96, 0xc7, 0x58, 0x54, 0xad, 0xaa, 0x8e, 0xd9, 0x4f, 0x41, 0x70, 0xa2, + 0x2c, 0xda, 0x80, 0xd3, 0x32, 0xd3, 0xa0, 0x3e, 0x3f, 0xe5, 0x18, 0x84, 0x2c, 0x31, 0xcc, 0x10, + 0x7f, 0x95, 0x32, 0xd7, 0x89, 0x10, 0x77, 0xe6, 0x43, 0xcf, 0x75, 0x7d, 0x9a, 0xf3, 0x37, 0xc7, + 0xc7, 0xe3, 0x88, 0x83, 0x57, 0x93, 0x48, 0x9c, 0xa6, 0x47, 0x3e, 0x1c, 0x77, 0xbd, 0xac, 0x59, + 0x7d, 0x82, 0x31, 0xfa, 0x10, 0x7f, 0x6e, 0xdd, 0x79, 0x46, 0x67, 0xe2, 0x71, 0x36, 0xdf, 0x77, + 0xe6, 0xf7, 0xf7, 0x07, 0x16, 0x2d, 0xad, 0x49, 0xe7, 0xe8, 0x53, 0x30, 0xa2, 0x7f, 0x94, 0x90, + 0x34, 0xce, 0x65, 0x0b, 0xaf, 0xda, 0x9e, 0xc0, 0x65, 0x7b, 0xb5, 0xee, 0x75, 0x1c, 0x36, 0x38, + 0xa2, 0x7a, 0x46, 0x6c, 0x83, 0x0b, 0xbd, 0x49, 0x32, 0xbd, 0xbb, 0xbd, 0x11, 0xc8, 0x9e, 0xee, + 0xe8, 0x2a, 0x0c, 0xd5, 0x9b, 0x2e, 0xf1, 0xa2, 0x4a, 0xb5, 0x53, 0xf4, 0xc6, 0x05, 0x41, 0x23, + 0xd6, 0x8f, 0xc8, 0xf1, 0xc2, 0x61, 0x58, 0x71, 0xb0, 0x5f, 0x82, 0xe1, 0x5a, 0x93, 0x90, 0x16, + 0x7f, 0xbe, 0x83, 0x9e, 0x64, 0xb7, 0x09, 0x26, 0x0f, 0x5a, 0x4c, 0x1e, 0xd4, 0x2f, 0x0a, 0x4c, + 0x12, 0x94, 0x78, 0xfb, 0xb7, 0x0a, 0x50, 0xee, 0x92, 0x6a, 0x28, 0x61, 0xc0, 0xb2, 0x7a, 0x32, + 0x60, 0xcd, 0xc1, 0x78, 0xfc, 0x4f, 0xd7, 0x8d, 0x29, 0x1f, 0xd8, 0x1b, 0x26, 0x1a, 0x27, 0xe9, + 0x7b, 0x7e, 0xce, 0xa0, 0xdb, 0xc0, 0xfa, 0xba, 0x3e, 0xc8, 0x31, 0x6c, 0xdf, 0xfd, 0xbd, 0x5f, + 0x98, 0x73, 0xed, 0x98, 0xf6, 0xd7, 0x0b, 0x70, 0x5c, 0x75, 0xe1, 0x77, 0x6e, 0xc7, 0x5d, 0x4f, + 0x77, 0xdc, 0x21, 0x58, 0x81, 0xed, 0x6b, 0x30, 0xc0, 0x03, 0x59, 0xf6, 0x20, 0xa8, 0x3f, 0x6a, + 0xc6, 0xd7, 0x56, 0xb2, 0xa1, 0x11, 0x63, 0xfb, 0x07, 0x2c, 0x18, 0x4f, 0xbc, 0x8b, 0x43, 0x58, + 0x7b, 0x3c, 0x7d, 0x3f, 0xc2, 0x74, 0x96, 0x98, 0x7e, 0x16, 0xfa, 0x36, 0xfd, 0x30, 0x4a, 0xba, + 0x88, 0x5c, 0xf6, 0xc3, 0x08, 0x33, 0x8c, 0xfd, 0x47, 0x16, 0xf4, 0xaf, 0x39, 0xae, 0x17, 0x49, + 0x73, 0x82, 0x95, 0x63, 0x4e, 0xe8, 0xe5, 0xbb, 0xd0, 0x8b, 0x30, 0x40, 0xd6, 0xd7, 0x49, 0x3d, + 0x12, 0xa3, 0x2a, 0x83, 0x28, 0x0c, 0x2c, 0x31, 0x28, 0x95, 0x1c, 0x59, 0x65, 0xfc, 0x2f, 0x16, + 0xc4, 0xe8, 0x26, 0x94, 0x22, 0x77, 0x9b, 0xcc, 0x35, 0x1a, 0xc2, 0xc8, 0x7e, 0x1f, 0x91, 0x3f, + 0xd6, 0x24, 0x03, 0x1c, 0xf3, 0xb2, 0xbf, 0x54, 0x00, 0x88, 0x23, 0x80, 0x75, 0xfb, 0xc4, 0xf9, + 0x94, 0xf9, 0xf5, 0x5c, 0x86, 0xf9, 0x15, 0xc5, 0x0c, 0x33, 0x6c, 0xaf, 0xaa, 0x9b, 0x8a, 0x3d, + 0x75, 0x53, 0xdf, 0x41, 0xba, 0x69, 0x01, 0x26, 0xe3, 0x08, 0x66, 0x66, 0x00, 0x47, 0x76, 0xe8, + 0xae, 0x25, 0x91, 0x38, 0x4d, 0x6f, 0x13, 0x38, 0xab, 0x02, 0x39, 0x89, 0xb3, 0x90, 0x79, 0x90, + 0xeb, 0xe6, 0xec, 0x2e, 0xfd, 0x14, 0xdb, 0x97, 0x0b, 0xb9, 0xf6, 0xe5, 0x9f, 0xb0, 0xe0, 0x58, + 0xb2, 0x1e, 0xf6, 0xdc, 0xfa, 0x0b, 0x16, 0x1c, 0x8f, 0xd3, 0x78, 0xa4, 0x6d, 0xfa, 0x2f, 0x74, + 0x0c, 0x4e, 0x95, 0xd3, 0xe2, 0x38, 0x5a, 0xc7, 0x4a, 0x16, 0x6b, 0x9c, 0x5d, 0xa3, 0xfd, 0x5f, + 0xfa, 0x60, 0x3a, 0x2f, 0xaa, 0x15, 0x7b, 0x60, 0xe2, 0xdc, 0xa9, 0x6d, 0x91, 0xdb, 0xc2, 0x8d, + 0x3f, 0x7e, 0x60, 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0x99, 0x5b, 0x0a, 0x3d, 0x66, 0x6e, 0xd9, 0x84, + 0xc9, 0xdb, 0x9b, 0xc4, 0xbb, 0xee, 0x85, 0x4e, 0xe4, 0x86, 0xeb, 0x2e, 0xb3, 0x48, 0xf3, 0x79, + 0x23, 0xd3, 0x67, 0x4f, 0xde, 0x4c, 0x12, 0xec, 0xef, 0x95, 0x4f, 0x1b, 0x80, 0xb8, 0xc9, 0x7c, + 0x23, 0xc1, 0x69, 0xa6, 0xe9, 0xc4, 0x37, 0x7d, 0x0f, 0x38, 0xf1, 0xcd, 0xb6, 0x2b, 0xfc, 0x58, + 0xe4, 0xeb, 0x01, 0x76, 0xd7, 0x5c, 0x51, 0x50, 0xac, 0x51, 0xa0, 0x4f, 0x00, 0xd2, 0x93, 0x8b, + 0x19, 0x41, 0x45, 0x9f, 0xbd, 0xb7, 0x57, 0x46, 0xab, 0x29, 0xec, 0xfe, 0x5e, 0x79, 0x8a, 0x42, + 0x2b, 0x1e, 0xbd, 0xb3, 0xc6, 0x91, 0xd8, 0x32, 0x18, 0xa1, 0x9b, 0x30, 0x41, 0xa1, 0x6c, 0x45, + 0xc9, 0x88, 0xa5, 0xfc, 0x9e, 0xf9, 0xf4, 0xbd, 0xbd, 0xf2, 0xc4, 0x6a, 0x02, 0x97, 0xc7, 0x3a, + 0xc5, 0x24, 0x23, 0xff, 0xcd, 0x50, 0xaf, 0xf9, 0x6f, 0xec, 0x2f, 0x58, 0x70, 0x32, 0x37, 0x99, + 0x3f, 0x3a, 0x0f, 0x43, 0x4e, 0xcb, 0xe5, 0x86, 0x0f, 0x71, 0xd4, 0x30, 0x05, 0x5b, 0xb5, 0xc2, + 0xcd, 0x1e, 0x0a, 0x4b, 0x77, 0xf8, 0x2d, 0xd7, 0x6b, 0x24, 0x77, 0xf8, 0x2b, 0xae, 0xd7, 0xc0, + 0x0c, 0xa3, 0x8e, 0xac, 0x62, 0xee, 0x23, 0x86, 0xaf, 0xd2, 0xb5, 0x9a, 0x91, 0xf6, 0xff, 0x68, + 0x9b, 0x81, 0x9e, 0xd6, 0x8d, 0x94, 0xc2, 0x1f, 0x31, 0xd7, 0x40, 0xf9, 0x79, 0x0b, 0xc4, 0xa3, + 0xe7, 0x1e, 0xce, 0xe4, 0x37, 0x61, 0x64, 0x27, 0x9d, 0x78, 0xf1, 0x6c, 0xfe, 0x2b, 0x70, 0x11, + 0xab, 0x5d, 0x89, 0xe8, 0x46, 0x92, 0x45, 0x83, 0x97, 0xdd, 0x00, 0x81, 0x5d, 0x24, 0xcc, 0x14, + 0xd1, 0xbd, 0x35, 0xcf, 0x01, 0x34, 0x18, 0x2d, 0xcb, 0xc6, 0x5c, 0x30, 0x25, 0xae, 0x45, 0x85, + 0xc1, 0x1a, 0x95, 0xfd, 0xcf, 0x0b, 0x30, 0x2c, 0x13, 0xfd, 0xb5, 0xbd, 0x5e, 0x14, 0x86, 0x07, + 0xca, 0xfc, 0x8d, 0x2e, 0x40, 0x89, 0x69, 0xb4, 0xab, 0xb1, 0x9e, 0x55, 0xe9, 0x93, 0x56, 0x24, + 0x02, 0xc7, 0x34, 0x4c, 0x7c, 0x6f, 0xdf, 0x62, 0xe4, 0x89, 0x27, 0xba, 0x35, 0x0e, 0xc6, 0x12, + 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, 0x0d, 0x6e, 0x05, 0xeb, 0x57, 0x71, 0x4f, + 0x26, 0x56, 0x12, 0xb8, 0xfd, 0xbd, 0xf2, 0xb1, 0x24, 0x8c, 0x99, 0x77, 0x53, 0x5c, 0x98, 0xb3, + 0x1b, 0xaf, 0x84, 0xee, 0xea, 0x29, 0x1f, 0xb9, 0x18, 0x85, 0x75, 0x3a, 0xfb, 0x53, 0x80, 0xd2, + 0x29, 0x0f, 0xd1, 0xeb, 0xdc, 0x59, 0xda, 0x0d, 0x48, 0xa3, 0x93, 0xb9, 0x57, 0x8f, 0xee, 0x21, + 0x5f, 0xd7, 0xf1, 0x52, 0x58, 0x95, 0xb7, 0xff, 0x8f, 0x22, 0x4c, 0x24, 0xe3, 0x09, 0xa0, 0xcb, + 0x30, 0xc0, 0x45, 0x4a, 0xc1, 0xbe, 0x83, 0x37, 0x91, 0x16, 0x85, 0x80, 0x1d, 0xae, 0x42, 0x2a, + 0x15, 0xe5, 0xd1, 0x5b, 0x30, 0xdc, 0xf0, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x31, 0x57, 0xad, 0x88, + 0xe9, 0x9c, 0xa9, 0xe2, 0x58, 0x8c, 0xc9, 0xf4, 0xc8, 0x06, 0xcc, 0x72, 0x1e, 0xa3, 0xb0, 0xce, + 0x0e, 0xad, 0xb1, 0x14, 0x21, 0xeb, 0xee, 0xc6, 0x8a, 0xd3, 0xea, 0xf4, 0x72, 0x66, 0x41, 0x12, + 0x69, 0x9c, 0x47, 0x45, 0x1e, 0x11, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x19, 0x98, 0x0a, 0x73, 0x8c, + 0x2e, 0x79, 0x19, 0x70, 0x3b, 0xd9, 0x21, 0xe6, 0x1f, 0xba, 0xb7, 0x57, 0x9e, 0xca, 0x32, 0xcf, + 0x64, 0x55, 0x63, 0x7f, 0xbe, 0x0f, 0x66, 0x64, 0xa6, 0xcf, 0x0c, 0x3f, 0xfd, 0xcf, 0x59, 0x09, + 0x47, 0xfd, 0x97, 0xf3, 0xf7, 0x86, 0x07, 0xe6, 0xae, 0xff, 0xc5, 0xb4, 0xbb, 0xfe, 0xab, 0x07, + 0x6c, 0xc6, 0xa1, 0x39, 0xed, 0x7f, 0xc7, 0x7a, 0xda, 0x7f, 0xf9, 0x18, 0x18, 0xbb, 0xb9, 0x91, + 0x19, 0xdf, 0x3a, 0xa4, 0xcc, 0xf8, 0x18, 0x86, 0xc8, 0x76, 0x2b, 0xda, 0x5d, 0x74, 0x03, 0xd1, + 0xe2, 0x4c, 0x9e, 0x4b, 0x82, 0x26, 0xcd, 0x53, 0x62, 0xb0, 0xe2, 0x83, 0x76, 0x60, 0x72, 0xa3, + 0x4e, 0x12, 0xc9, 0xb1, 0x8b, 0xf9, 0xab, 0xe7, 0xd2, 0xc2, 0x52, 0x87, 0xcc, 0xd8, 0xec, 0xbe, + 0x90, 0x22, 0xc1, 0xe9, 0x2a, 0x58, 0x62, 0x6e, 0xe7, 0x76, 0xb8, 0xd4, 0x74, 0xc2, 0xc8, 0xad, + 0xcf, 0x37, 0xfd, 0xfa, 0x56, 0x2d, 0xf2, 0x03, 0x99, 0xf6, 0x2b, 0x53, 0x5c, 0x9f, 0xbb, 0x59, + 0x4b, 0xd1, 0xa7, 0x13, 0x73, 0x67, 0x51, 0xe1, 0xcc, 0xba, 0xd0, 0x2a, 0x0c, 0x6e, 0xb8, 0x11, + 0x26, 0x2d, 0x5f, 0xdc, 0xea, 0x33, 0x37, 0xa4, 0x4b, 0x9c, 0x24, 0x9d, 0x28, 0x5b, 0x20, 0xb0, + 0x64, 0x82, 0x5e, 0x57, 0x5b, 0xf1, 0x40, 0xbe, 0xce, 0x2e, 0xed, 0xff, 0x94, 0xb9, 0x19, 0xbf, + 0x06, 0x45, 0x6f, 0x3d, 0xec, 0x14, 0xb5, 0x63, 0x75, 0xb9, 0x96, 0x4e, 0x60, 0xbd, 0xba, 0x5c, + 0xc3, 0xb4, 0x20, 0x7b, 0xe0, 0x17, 0xd6, 0x43, 0x57, 0x24, 0x30, 0xc9, 0x7c, 0xef, 0x58, 0xa9, + 0x2d, 0xd4, 0x2a, 0xe9, 0xa4, 0xdd, 0x0c, 0x8c, 0x79, 0x71, 0x74, 0x03, 0x4a, 0x1b, 0xdc, 0x7e, + 0xa3, 0xb2, 0xff, 0x67, 0x1e, 0x09, 0x97, 0x24, 0x51, 0x3a, 0x55, 0xb7, 0x42, 0xe1, 0x98, 0x15, + 0xfa, 0xbc, 0x05, 0xc7, 0x93, 0xe9, 0x92, 0xd9, 0xb3, 0x1c, 0xe1, 0x2a, 0xf4, 0x62, 0x2f, 0xf9, + 0xab, 0x59, 0x01, 0xa3, 0x42, 0xa6, 0x66, 0xcf, 0x24, 0xc3, 0xd9, 0xd5, 0xd1, 0x8e, 0x0e, 0x6e, + 0x35, 0x84, 0xcb, 0x4a, 0x66, 0x47, 0x27, 0x42, 0x98, 0xf0, 0x8e, 0xc6, 0xf3, 0x8b, 0x98, 0x16, + 0x44, 0x6b, 0xa0, 0xe5, 0xf9, 0x67, 0x66, 0x8c, 0x9c, 0x33, 0x78, 0x59, 0x51, 0xc9, 0x9c, 0x3d, + 0x54, 0x32, 0x8b, 0xa1, 0x58, 0xe3, 0x43, 0xa7, 0x52, 0xdd, 0xf5, 0x1a, 0x24, 0x60, 0x46, 0x8c, + 0x9c, 0xa9, 0xb4, 0xc0, 0x28, 0xd2, 0x53, 0x89, 0xc3, 0xb1, 0xe0, 0xc0, 0x78, 0x91, 0xd6, 0xe6, + 0x7a, 0xd8, 0x29, 0x38, 0xfd, 0x02, 0x69, 0x6d, 0x26, 0x26, 0x14, 0xe7, 0xc5, 0xe0, 0x58, 0x70, + 0xa0, 0x4b, 0x66, 0x9d, 0x2e, 0x20, 0x12, 0x74, 0x4a, 0xfc, 0xbf, 0xcc, 0x49, 0xd2, 0x4b, 0x46, + 0x20, 0xb0, 0x64, 0x82, 0x3e, 0x69, 0xca, 0x1c, 0x3c, 0xf5, 0xff, 0xd3, 0x5d, 0x64, 0x0e, 0x83, + 0x6f, 0x67, 0xa9, 0xe3, 0x65, 0x28, 0xac, 0xd7, 0x45, 0xb6, 0xff, 0x4c, 0x35, 0xf3, 0xf2, 0x82, + 0xc1, 0x8d, 0x05, 0x7b, 0x5e, 0x5e, 0xc0, 0x85, 0xf5, 0x3a, 0x9d, 0xfa, 0xce, 0xdd, 0x76, 0x40, + 0x96, 0xdd, 0xa6, 0x4c, 0xd9, 0x9f, 0x39, 0xf5, 0xe7, 0x24, 0x51, 0x7a, 0xea, 0x2b, 0x14, 0x8e, + 0x59, 0x51, 0xbe, 0xb1, 0x24, 0x34, 0x95, 0xcf, 0x57, 0x09, 0x3c, 0x69, 0xbe, 0x99, 0xb2, 0xd0, + 0x16, 0x8c, 0xee, 0x84, 0xad, 0x4d, 0x22, 0x77, 0x45, 0x91, 0xaf, 0x3f, 0xf3, 0x4d, 0xfb, 0x0d, + 0x41, 0xe8, 0x06, 0x51, 0xdb, 0x69, 0xa6, 0x36, 0x72, 0x76, 0x1b, 0xbf, 0xa1, 0x33, 0xc3, 0x26, + 0x6f, 0x3a, 0x11, 0xde, 0xe6, 0x81, 0xa7, 0x44, 0x2a, 0xff, 0xcc, 0x89, 0x90, 0x11, 0x9b, 0x8a, + 0x4f, 0x04, 0x81, 0xc0, 0x92, 0x89, 0xea, 0x6c, 0x76, 0x00, 0x9d, 0xe8, 0xd2, 0xd9, 0xa9, 0xf6, + 0xc6, 0x9d, 0xcd, 0x0e, 0x9c, 0x98, 0x15, 0x3b, 0x68, 0x5a, 0x19, 0x69, 0xab, 0xa7, 0x1f, 0xca, + 0x3f, 0x68, 0xba, 0xa5, 0xb9, 0xe6, 0x07, 0x4d, 0x16, 0x15, 0xce, 0xac, 0x8b, 0x7e, 0x5c, 0x4b, + 0xc6, 0x10, 0x13, 0xc1, 0xf4, 0x9f, 0xcc, 0x09, 0xc1, 0x97, 0x0e, 0x34, 0xc6, 0x3f, 0x4e, 0xa1, + 0x70, 0xcc, 0x0a, 0x35, 0x60, 0xac, 0x65, 0xc4, 0xa6, 0x64, 0x49, 0x01, 0x72, 0xe4, 0x82, 0xac, + 0x28, 0x96, 0x5c, 0xa9, 0x60, 0x62, 0x70, 0x82, 0x27, 0xf3, 0xd0, 0xe2, 0xcf, 0xad, 0x58, 0xce, + 0x80, 0x9c, 0xa1, 0xce, 0x78, 0x91, 0xc5, 0x87, 0x5a, 0x20, 0xb0, 0x64, 0x42, 0x7b, 0x43, 0x3c, + 0x12, 0xf2, 0x43, 0x96, 0x7a, 0x23, 0xcf, 0x90, 0x9a, 0x65, 0x59, 0x90, 0x01, 0x99, 0x05, 0x0a, + 0xc7, 0xac, 0xe8, 0x4e, 0x4e, 0x0f, 0xbc, 0x53, 0xf9, 0x3b, 0x79, 0xf2, 0xb8, 0x63, 0x3b, 0x39, + 0x3d, 0xec, 0x8a, 0xe2, 0xa8, 0x53, 0xf1, 0x83, 0x59, 0xda, 0x80, 0x9c, 0x76, 0xa9, 0x00, 0xc4, + 0xe9, 0x76, 0x29, 0x14, 0x8e, 0x59, 0xd9, 0x3f, 0x58, 0x80, 0x33, 0x9d, 0xd7, 0x5b, 0x6c, 0x2e, + 0xa9, 0xc6, 0x3e, 0x25, 0x09, 0x73, 0x09, 0xbf, 0xbc, 0xc7, 0x54, 0x3d, 0x87, 0x14, 0xbd, 0x04, + 0x93, 0xea, 0x29, 0x57, 0xd3, 0xad, 0xef, 0xae, 0xc6, 0xfa, 0x12, 0x15, 0x7c, 0xa3, 0x96, 0x24, + 0xc0, 0xe9, 0x32, 0x68, 0x0e, 0xc6, 0x0d, 0x60, 0x65, 0x51, 0x5c, 0xd2, 0xe3, 0x40, 0xf5, 0x26, + 0x1a, 0x27, 0xe9, 0xed, 0x9f, 0xb5, 0xe0, 0xa1, 0x9c, 0xac, 0xc1, 0x3d, 0x47, 0xcc, 0x5c, 0x87, + 0xf1, 0x96, 0x59, 0xb4, 0x4b, 0x90, 0x5f, 0x23, 0x37, 0xb1, 0x6a, 0x6b, 0x02, 0x81, 0x93, 0x4c, + 0xed, 0x9f, 0x2e, 0xc0, 0xe9, 0x8e, 0xbe, 0xc9, 0x08, 0xc3, 0x89, 0x8d, 0xed, 0xd0, 0x59, 0x08, + 0x48, 0x83, 0x78, 0x91, 0xeb, 0x34, 0x6b, 0x2d, 0x52, 0xd7, 0x0c, 0x5e, 0xcc, 0xc9, 0xf7, 0xd2, + 0x4a, 0x6d, 0x2e, 0x4d, 0x81, 0x73, 0x4a, 0xa2, 0x65, 0x40, 0x69, 0x8c, 0x18, 0x61, 0x96, 0x80, + 0x22, 0xcd, 0x0f, 0x67, 0x94, 0x40, 0x1f, 0x84, 0x51, 0xe5, 0xf3, 0xac, 0x8d, 0x38, 0xdb, 0xd8, + 0xb1, 0x8e, 0xc0, 0x26, 0x1d, 0xba, 0xc8, 0x33, 0x98, 0x88, 0x5c, 0x37, 0xc2, 0x3a, 0x36, 0x2e, + 0xd3, 0x93, 0x08, 0x30, 0xd6, 0x69, 0xe6, 0x5f, 0xfa, 0xed, 0x6f, 0x9e, 0x79, 0xdf, 0xef, 0x7d, + 0xf3, 0xcc, 0xfb, 0xfe, 0xf0, 0x9b, 0x67, 0xde, 0xf7, 0x3d, 0xf7, 0xce, 0x58, 0xbf, 0x7d, 0xef, + 0x8c, 0xf5, 0x7b, 0xf7, 0xce, 0x58, 0x7f, 0x78, 0xef, 0x8c, 0xf5, 0x27, 0xf7, 0xce, 0x58, 0x5f, + 0xfa, 0xd3, 0x33, 0xef, 0x7b, 0x13, 0xc5, 0x31, 0x68, 0x2f, 0xd0, 0xd1, 0xb9, 0xb0, 0x73, 0xf1, + 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x95, 0x37, 0xa6, 0xd7, 0x16, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -11753,6 +11790,18 @@ func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Sleep != nil { + { + size, err := m.Sleep.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.TCPSocket != nil { { size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) @@ -14777,6 +14826,24 @@ func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MismatchLabelKeys) > 0 { + for iNdEx := len(m.MismatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MismatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MismatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MismatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.MatchLabelKeys) > 0 { + for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if m.NamespaceSelector != nil { { size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -19299,6 +19366,32 @@ func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SleepAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SleepAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -22304,6 +22397,10 @@ func (m *LifecycleHandler) Size() (n int) { l = m.TCPSocket.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Sleep != nil { + l = m.Sleep.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23399,6 +23496,18 @@ func (m *PodAffinityTerm) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchLabelKeys) > 0 { + for _, s := range m.MatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MismatchLabelKeys) > 0 { + for _, s := range m.MismatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -25040,6 +25149,16 @@ func (m *SessionAffinityConfig) Size() (n int) { return n } +func (m *SleepAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + return n +} + func (m *StorageOSPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -26744,6 +26863,7 @@ func (this *LifecycleHandler) String() string { `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `Sleep:` + strings.Replace(this.Sleep.String(), "SleepAction", "SleepAction", 1) + `,`, `}`, }, "") return s @@ -27598,6 +27718,8 @@ func (this *PodAffinityTerm) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, + `MismatchLabelKeys:` + fmt.Sprintf("%v", this.MismatchLabelKeys) + `,`, `}`, }, "") return s @@ -28877,6 +28999,16 @@ func (this *SessionAffinityConfig) String() string { }, "") return s } +func (this *SleepAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SleepAction{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `}`, + }, "") + return s +} func (this *StorageOSPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -43247,6 +43379,42 @@ func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sleep", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sleep == nil { + m.Sleep = &SleepAction{} + } + if err := m.Sleep.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52911,6 +53079,70 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MismatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MismatchLabelKeys = append(m.MismatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -67106,6 +67338,75 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *SleepAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index 8080ae98db9b2..0fa44a4845e91 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -1159,7 +1159,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -2088,6 +2088,11 @@ message LifecycleHandler { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional optional TCPSocketAction tcpSocket = 3; + + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + optional SleepAction sleep = 4; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -3262,6 +3267,7 @@ message PodAffinity { // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; @@ -3286,6 +3292,32 @@ message PodAffinityTerm { // An empty selector ({}) matches all namespaces. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + repeated string matchLabelKeys = 5; + + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + repeated string mismatchLabelKeys = 6; } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -5259,7 +5291,7 @@ message ServicePort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5562,6 +5594,12 @@ message SessionAffinityConfig { optional ClientIPConfig clientIP = 1; } +// SleepAction describes a "sleep" action. +message SleepAction { + // Seconds is the number of seconds to sleep. + optional int64 seconds = 1; +} + // Represents a StorageOS persistent volume resource. message StorageOSPersistentVolumeSource { // volumeName is the human-readable name of the StorageOS volume. Volume diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 13bdb6da75285..f4eeb6c648b83 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2272,6 +2272,12 @@ type ExecAction struct { Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } +// SleepAction describes a "sleep" action. +type SleepAction struct { + // Seconds is the number of seconds to sleep. + Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"` +} + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2667,6 +2673,10 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -2866,6 +2876,9 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -3157,6 +3170,7 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies a static list of namespace names that the term applies to. @@ -3178,6 +3192,30 @@ type PodAffinityTerm struct { // An empty selector ({}) matches all namespaces. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` } // Node affinity is a group of node affinity scheduling rules. @@ -5035,7 +5073,7 @@ type ServicePort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5279,7 +5317,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index b33dfafb07dd9..cbc72fa4ff13f 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -531,7 +531,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -935,6 +935,7 @@ var map_LifecycleHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", + "sleep": "Sleep represents the duration that the container should sleep before being terminated.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -1490,10 +1491,12 @@ func (PodAffinity) SwaggerDoc() map[string]string { var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods.", + "labelSelector": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -2326,7 +2329,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2391,6 +2394,15 @@ func (SessionAffinityConfig) SwaggerDoc() map[string]string { return map_SessionAffinityConfig } +var map_SleepAction = map[string]string{ + "": "SleepAction describes a \"sleep\" action.", + "seconds": "Seconds is the number of seconds to sleep.", +} + +func (SleepAction) SwaggerDoc() map[string]string { + return map_SleepAction +} + var map_StorageOSPersistentVolumeSource = map[string]string{ "": "Represents a StorageOS persistent volume resource.", "volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index c1453c59cd0a1..528fbb3b12562 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -2045,6 +2045,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } + if in.Sleep != nil { + in, out := &in.Sleep, &out.Sleep + *out = new(SleepAction) + **out = **in + } return } @@ -3477,6 +3482,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5686,6 +5701,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SleepAction) DeepCopyInto(out *SleepAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. +func (in *SleepAction) DeepCopy() *SleepAction { + if in == nil { + return nil + } + out := new(SleepAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in diff --git a/staging/src/k8s.io/api/discovery/v1/generated.proto b/staging/src/k8s.io/api/discovery/v1/generated.proto index 490ce89224740..6d234017b72f3 100644 --- a/staging/src/k8s.io/api/discovery/v1/generated.proto +++ b/staging/src/k8s.io/api/discovery/v1/generated.proto @@ -118,7 +118,7 @@ message EndpointHints { // +structType=atomic message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -145,7 +145,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/staging/src/k8s.io/api/discovery/v1/types.go b/staging/src/k8s.io/api/discovery/v1/types.go index efbb09918c207..7ebb07ca35924 100644 --- a/staging/src/k8s.io/api/discovery/v1/types.go +++ b/staging/src/k8s.io/api/discovery/v1/types.go @@ -168,7 +168,7 @@ type ForZone struct { // +structType=atomic type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -195,7 +195,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/staging/src/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index bef7745398ab2..41c3060568f25 100644 --- a/staging/src/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/discovery/v1beta1/generated.proto b/staging/src/k8s.io/api/discovery/v1beta1/generated.proto index 8b6c360b0e6db..ec555a40b3ac3 100644 --- a/staging/src/k8s.io/api/discovery/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/discovery/v1beta1/generated.proto @@ -119,7 +119,7 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/staging/src/k8s.io/api/discovery/v1beta1/types.go b/staging/src/k8s.io/api/discovery/v1beta1/types.go index f09f7f320cd2c..defd8e2ce6919 100644 --- a/staging/src/k8s.io/api/discovery/v1beta1/types.go +++ b/staging/src/k8s.io/api/discovery/v1beta1/types.go @@ -172,7 +172,7 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/staging/src/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index b1d4c306ccd9b..847d4d58e0672 100644 --- a/staging/src/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -64,7 +64,7 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto b/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto index 96df0ace798e6..04b54820c73fc 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go b/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go index 9e05ff1a090f9..abc3e420096f5 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta1/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index 1405f3c3ca6a6..d69bdac62285e 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto b/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto index a8c8a32737403..a832114afea3c 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/staging/src/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go b/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go index e8cf7abfff6eb..c66cb173f4a2c 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta2/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 49a4178096632..921122731af76 100644 --- a/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod index 2e007f246fdfa..e9826b9d2cd55 100644 --- a/staging/src/k8s.io/api/go.mod +++ b/staging/src/k8s.io/api/go.mod @@ -2,11 +2,11 @@ module k8s.io/api -go 1.20 +go 1.21.3 require ( github.com/gogo/protobuf v1.3.2 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 k8s.io/apimachinery v0.0.0 ) @@ -22,8 +22,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum index f81a50a2e23d0..809ee94aecf2c 100644 --- a/staging/src/k8s.io/api/go.sum +++ b/staging/src/k8s.io/api/go.sum @@ -45,7 +45,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -54,19 +55,16 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -74,26 +72,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -110,12 +108,11 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go index f54d1f82421c6..2db3788852559 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -25,14 +25,11 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -46,94 +43,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } -func (*ClusterCIDR) ProtoMessage() {} -func (*ClusterCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{0} -} -func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDR.Merge(m, src) -} -func (m *ClusterCIDR) XXX_Size() int { - return m.Size() -} -func (m *ClusterCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo - -func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } -func (*ClusterCIDRList) ProtoMessage() {} -func (*ClusterCIDRList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{1} -} -func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRList.Merge(m, src) -} -func (m *ClusterCIDRList) XXX_Size() int { - return m.Size() -} -func (m *ClusterCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo - -func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } -func (*ClusterCIDRSpec) ProtoMessage() {} -func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{2} -} -func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) -} -func (m *ClusterCIDRSpec) XXX_Size() int { - return m.Size() -} -func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo - func (m *IPAddress) Reset() { *m = IPAddress{} } func (*IPAddress) ProtoMessage() {} func (*IPAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{3} + return fileDescriptor_c1b7ac8d7d97acec, []int{0} } func (m *IPAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +74,7 @@ var xxx_messageInfo_IPAddress proto.InternalMessageInfo func (m *IPAddressList) Reset() { *m = IPAddressList{} } func (*IPAddressList) ProtoMessage() {} func (*IPAddressList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{4} + return fileDescriptor_c1b7ac8d7d97acec, []int{1} } func (m *IPAddressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +102,7 @@ var xxx_messageInfo_IPAddressList proto.InternalMessageInfo func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } func (*IPAddressSpec) ProtoMessage() {} func (*IPAddressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{5} + return fileDescriptor_c1b7ac8d7d97acec, []int{2} } func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +130,7 @@ var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo func (m *ParentReference) Reset() { *m = ParentReference{} } func (*ParentReference) ProtoMessage() {} func (*ParentReference) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{6} + return fileDescriptor_c1b7ac8d7d97acec, []int{3} } func (m *ParentReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,9 +156,6 @@ func (m *ParentReference) XXX_DiscardUnknown() { var xxx_messageInfo_ParentReference proto.InternalMessageInfo func init() { - proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") - proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") - proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") @@ -257,189 +167,39 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 698 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, - 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, - 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, - 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, - 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, - 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, - 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, - 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, - 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, - 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, - 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, - 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, - 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, - 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, - 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, - 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, - 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, - 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, - 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, - 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, - 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, - 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, - 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, - 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, - 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, - 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, - 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, - 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, - 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, - 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, - 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, - 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, - 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, - 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, - 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, - 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, - 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, - 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, - 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, - 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, - 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, - 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, - 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, -} - -func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.IPv6) - copy(dAtA[i:], m.IPv6) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) - i-- - dAtA[i] = 0x22 - i -= len(m.IPv4) - copy(dAtA[i:], m.IPv4) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) - i-- - dAtA[i] = 0x10 - if m.NodeSelector != nil { - { - size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + // 509 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x8d, 0xdb, 0x44, 0x8a, 0xb7, 0x44, 0x80, 0x4f, 0x51, 0x0e, 0x9b, 0x28, 0x5c, 0x8a, 0x44, + 0x77, 0x49, 0x85, 0x10, 0x57, 0x22, 0xa4, 0xaa, 0x12, 0xb4, 0x95, 0xb9, 0xa1, 0x1e, 0xd8, 0x38, + 0x53, 0xc7, 0x18, 0xef, 0xae, 0x76, 0xd7, 0x41, 0xdc, 0xf8, 0x09, 0xfc, 0x1b, 0x4e, 0x70, 0xce, + 0xb1, 0xc7, 0x9e, 0x22, 0x62, 0xfe, 0x08, 0xda, 0x8d, 0x63, 0x57, 0x8d, 0xfa, 0x71, 0xf3, 0xbc, + 0x79, 0xef, 0xcd, 0xbc, 0x59, 0x19, 0x1d, 0xa5, 0x6f, 0x34, 0x49, 0x04, 0x4d, 0xf3, 0x09, 0x28, + 0x0e, 0x06, 0x34, 0x9d, 0x03, 0x9f, 0x0a, 0x45, 0xcb, 0x06, 0x93, 0x09, 0xe5, 0x60, 0xbe, 0x09, + 0x95, 0x26, 0x3c, 0xa6, 0xf3, 0x11, 0xfb, 0x2a, 0x67, 0x6c, 0x44, 0x63, 0xe0, 0xa0, 0x98, 0x81, + 0x29, 0x91, 0x4a, 0x18, 0x11, 0xe0, 0x35, 0x9f, 0x30, 0x99, 0x90, 0x9a, 0x4f, 0x36, 0xfc, 0xde, + 0x41, 0x9c, 0x98, 0x59, 0x3e, 0x21, 0x91, 0xc8, 0x68, 0x2c, 0x62, 0x41, 0x9d, 0x6c, 0x92, 0x5f, + 0xb8, 0xca, 0x15, 0xee, 0x6b, 0x6d, 0xd7, 0x7b, 0x55, 0x8f, 0xcf, 0x58, 0x34, 0x4b, 0x38, 0xa8, + 0xef, 0x54, 0xa6, 0xb1, 0x05, 0x34, 0xcd, 0xc0, 0x30, 0x3a, 0xdf, 0x5a, 0xa2, 0x47, 0x6f, 0x53, + 0xa9, 0x9c, 0x9b, 0x24, 0x83, 0x2d, 0xc1, 0xeb, 0xfb, 0x04, 0x3a, 0x9a, 0x41, 0xc6, 0x6e, 0xea, + 0x86, 0x7f, 0x3c, 0xe4, 0x1f, 0x9f, 0xbd, 0x9d, 0x4e, 0x15, 0x68, 0x1d, 0x7c, 0x46, 0x6d, 0xbb, + 0xd1, 0x94, 0x19, 0xd6, 0xf5, 0x06, 0xde, 0xfe, 0xde, 0xe1, 0x4b, 0x52, 0x9f, 0xa3, 0x32, 0x26, + 0x32, 0x8d, 0x2d, 0xa0, 0x89, 0x65, 0x93, 0xf9, 0x88, 0x9c, 0x4e, 0xbe, 0x40, 0x64, 0x3e, 0x80, + 0x61, 0xe3, 0x60, 0xb1, 0xec, 0x37, 0x8a, 0x65, 0x1f, 0xd5, 0x58, 0x58, 0xb9, 0x06, 0xa7, 0xa8, + 0xa9, 0x25, 0x44, 0xdd, 0x1d, 0xe7, 0x7e, 0x40, 0xee, 0x3e, 0x36, 0xa9, 0x56, 0xfb, 0x28, 0x21, + 0x1a, 0x3f, 0x2a, 0xad, 0x9b, 0xb6, 0x0a, 0x9d, 0xd1, 0xf0, 0xb7, 0x87, 0x3a, 0x15, 0xeb, 0x7d, + 0xa2, 0x4d, 0x70, 0xbe, 0x15, 0x82, 0x3c, 0x2c, 0x84, 0x55, 0xbb, 0x08, 0x4f, 0xca, 0x39, 0xed, + 0x0d, 0x72, 0x2d, 0xc0, 0x09, 0x6a, 0x25, 0x06, 0x32, 0xdd, 0xdd, 0x19, 0xec, 0xee, 0xef, 0x1d, + 0x3e, 0x7f, 0x70, 0x82, 0x71, 0xa7, 0x74, 0x6d, 0x1d, 0x5b, 0x7d, 0xb8, 0xb6, 0x19, 0x66, 0xd7, + 0xd6, 0xb7, 0xb1, 0x82, 0x73, 0xe4, 0x4b, 0xa6, 0x80, 0x9b, 0x10, 0x2e, 0xca, 0xfd, 0xe9, 0x7d, + 0x43, 0xce, 0x36, 0x02, 0x50, 0xc0, 0x23, 0x18, 0x77, 0x8a, 0x65, 0xdf, 0xaf, 0xc0, 0xb0, 0x36, + 0x1c, 0xfe, 0xf2, 0xd0, 0xe3, 0x1b, 0xec, 0xe0, 0x19, 0x6a, 0xc5, 0x4a, 0xe4, 0xd2, 0x4d, 0xf3, + 0xeb, 0x3d, 0x8f, 0x2c, 0x18, 0xae, 0x7b, 0xc1, 0x0b, 0xd4, 0x56, 0xa0, 0x45, 0xae, 0x22, 0x70, + 0x8f, 0xe7, 0xd7, 0x57, 0x0a, 0x4b, 0x3c, 0xac, 0x18, 0x01, 0x45, 0x3e, 0x67, 0x19, 0x68, 0xc9, + 0x22, 0xe8, 0xee, 0x3a, 0xfa, 0xd3, 0x92, 0xee, 0x9f, 0x6c, 0x1a, 0x61, 0xcd, 0x09, 0x06, 0xa8, + 0x69, 0x8b, 0x6e, 0xd3, 0x71, 0xab, 0x87, 0xb6, 0xdc, 0xd0, 0x75, 0xc6, 0xef, 0x16, 0x2b, 0xdc, + 0xb8, 0x5c, 0xe1, 0xc6, 0xd5, 0x0a, 0x37, 0x7e, 0x14, 0xd8, 0x5b, 0x14, 0xd8, 0xbb, 0x2c, 0xb0, + 0x77, 0x55, 0x60, 0xef, 0x6f, 0x81, 0xbd, 0x9f, 0xff, 0x70, 0xe3, 0x13, 0xbe, 0xfb, 0x6f, 0xff, + 0x1f, 0x00, 0x00, 0xff, 0xff, 0xde, 0x6a, 0x6d, 0x5e, 0x27, 0x04, 0x00, 0x00, } func (m *IPAddress) Marshal() (dAtA []byte, err error) { @@ -587,11 +347,6 @@ func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -626,54 +381,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ClusterCIDR) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterCIDRList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *IPAddress) Size() (n int) { if m == nil { return 0 @@ -731,8 +438,6 @@ func (m *ParentReference) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -742,46 +447,6 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *ClusterCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ClusterCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, - `}`, - }, "") - return s -} func (this *IPAddress) String() string { if this == nil { return "nil" @@ -828,7 +493,6 @@ func (this *ParentReference) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s @@ -841,7 +505,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -864,10 +528,10 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -957,7 +621,7 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -980,10 +644,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1048,7 +712,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterCIDR{}) + m.Items = append(m.Items, IPAddress{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1074,7 +738,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1097,417 +761,15 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} - } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) - } - m.PerNodeHostBits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IPv4 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IPv6 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, IPAddress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1719,38 +981,6 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/staging/src/k8s.io/api/networking/v1alpha1/generated.proto b/staging/src/k8s.io/api/networking/v1alpha1/generated.proto index 0f1f30d7011ff..d6e8376a38036 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/networking/v1alpha1/generated.proto @@ -21,7 +21,6 @@ syntax = "proto2"; package k8s.io.api.networking.v1alpha1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,69 +28,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/networking/v1alpha1"; -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -message ClusterCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ClusterCIDRSpec spec = 2; -} - -// ClusterCIDRList contains a list of ClusterCIDR. -message ClusterCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ClusterCIDRs. - repeated ClusterCIDR items = 2; -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -message ClusterCIDRSpec { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - optional int32 perNodeHostBits = 2; - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv4 = 3; - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv6 = 4; -} - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, @@ -147,9 +83,5 @@ message ParentReference { // Name is the name of the object being referenced. // +required optional string name = 4; - - // UID is the uid of the object being referenced. - // +optional - optional string uid = 5; } diff --git a/staging/src/k8s.io/api/networking/v1alpha1/register.go b/staging/src/k8s.io/api/networking/v1alpha1/register.go index 8dda6394d47b3..f45f8ed1ecc81 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/register.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/register.go @@ -52,8 +52,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, ) diff --git a/staging/src/k8s.io/api/networking/v1alpha1/types.go b/staging/src/k8s.io/api/networking/v1alpha1/types.go index 52e4a11e8b1bc..8c431e5b5c6a9 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/types.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/types.go @@ -17,86 +17,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDRList contains a list of ClusterCIDR. -type ClusterCIDRList struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -143,9 +66,6 @@ type ParentReference struct { // Name is the name of the object being referenced. // +required Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` - // UID is the uid of the object being referenced. - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 85304784f4e4d..1fca9449844e8 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -27,38 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ClusterCIDR = map[string]string{ - "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ClusterCIDR) SwaggerDoc() map[string]string { - return map_ClusterCIDR -} - -var map_ClusterCIDRList = map[string]string{ - "": "ClusterCIDRList contains a list of ClusterCIDR.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ClusterCIDRs.", -} - -func (ClusterCIDRList) SwaggerDoc() map[string]string { - return map_ClusterCIDRList -} - -var map_ClusterCIDRSpec = map[string]string{ - "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", -} - -func (ClusterCIDRSpec) SwaggerDoc() map[string]string { - return map_ClusterCIDRSpec -} - var map_IPAddress = map[string]string{ "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -94,7 +62,6 @@ var map_ParentReference = map[string]string{ "resource": "Resource is the resource of the object being referenced.", "namespace": "Namespace is the namespace of the object being referenced.", "name": "Name is the name of the object being referenced.", - "uid": "UID is the uid of the object being referenced.", } func (ParentReference) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index 97db2eacc95c4..05b66cbfc7326 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -22,91 +22,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { - if in == nil { - return nil - } - out := new(ClusterCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { - if in == nil { - return nil - } - out := new(ClusterCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { - if in == nil { - return nil - } - out := new(ClusterCIDRSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPAddress) DeepCopyInto(out *IPAddress) { *out = *in diff --git a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index 60438ba59fce0..8b500f2ea10f7 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -21,42 +21,6 @@ limitations under the License. package v1alpha1 -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { - return 1, 25 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { - return 1, 28 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { - return 1, 31 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { - return 1, 25 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { - return 1, 28 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { - return 1, 31 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { diff --git a/staging/src/k8s.io/api/storage/v1/generated.proto b/staging/src/k8s.io/api/storage/v1/generated.proto index 5f8eccaefc5a3..b35f708c66397 100644 --- a/staging/src/k8s.io/api/storage/v1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1/generated.proto @@ -88,7 +88,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/staging/src/k8s.io/api/storage/v1/types.go b/staging/src/k8s.io/api/storage/v1/types.go index c785f368efd48..7d7b7664b89cd 100644 --- a/staging/src/k8s.io/api/storage/v1/types.go +++ b/staging/src/k8s.io/api/storage/v1/types.go @@ -291,7 +291,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go index c92a7f95a29eb..69ee683610731 100644 --- a/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/staging/src/k8s.io/api/storage/v1beta1/generated.proto b/staging/src/k8s.io/api/storage/v1beta1/generated.proto index 2b354dd4715e3..b99fd39e48aeb 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/generated.proto +++ b/staging/src/k8s.io/api/storage/v1beta1/generated.proto @@ -91,7 +91,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/staging/src/k8s.io/api/storage/v1beta1/types.go b/staging/src/k8s.io/api/storage/v1beta1/types.go index 4c39b49ccd882..0f5ade3c13838 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/types.go +++ b/staging/src/k8s.io/api/storage/v1beta1/types.go @@ -311,7 +311,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 0f2718b9c1459..6d9d233066a31 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.json index 9c75457b35fca..b8a149ba6501f 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.json @@ -690,6 +690,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -713,6 +716,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -974,6 +980,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -997,6 +1006,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1258,6 +1270,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1281,6 +1296,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1466,7 +1484,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1504,7 +1528,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1543,7 +1573,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1581,7 +1617,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.pb index d2b760ddd5726..e0508dbda05e6 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.yaml index 7073ec973a193..7b2b628c65815 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.DaemonSet.yaml @@ -116,6 +116,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -137,6 +141,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -159,6 +167,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -180,6 +192,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -239,6 +255,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -254,6 +272,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -446,6 +466,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -461,6 +483,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -655,6 +679,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -670,6 +696,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.json index b2d9e6c90b381..41d75f1ffd6b2 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.pb index 58f100937b40e..0e7deff85ed9a 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.yaml index 4a7cd2beecd8b..847aa54a2ba41 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.Deployment.yaml @@ -124,6 +124,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -145,6 +149,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -167,6 +175,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -188,6 +200,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -247,6 +263,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -262,6 +280,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -454,6 +474,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -469,6 +491,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -663,6 +687,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -678,6 +704,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.json index b6edd6afed864..84d71ebb41cad 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.json @@ -692,6 +692,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -715,6 +718,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -976,6 +982,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -999,6 +1008,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1260,6 +1272,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1283,6 +1298,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1468,7 +1486,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1506,7 +1530,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1545,7 +1575,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1583,7 +1619,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.pb index b7f8271cc7725..f4125ab3542b2 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.yaml index d8719cc509098..a905dd53f6a0e 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.ReplicaSet.yaml @@ -116,6 +116,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -137,6 +141,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -159,6 +167,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -180,6 +192,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -239,6 +255,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -254,6 +272,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -446,6 +466,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -461,6 +483,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -655,6 +679,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -670,6 +696,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.json index 8cb242ddda855..48c5bb9f8d467 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.pb index 55da801fa096a..2ddbe3615d14b 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.yaml index f0e49831c34a2..0cc8c95b0fd03 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1.StatefulSet.yaml @@ -124,6 +124,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -145,6 +149,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -167,6 +175,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -188,6 +200,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -247,6 +263,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -262,6 +280,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -454,6 +474,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -469,6 +491,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -663,6 +687,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -678,6 +704,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.json index 499e831a767ca..b84f4aa8b6168 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.pb index 773a3f81e24df..af3e8458b93bf 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.yaml index 73f52d258678f..4c537c4aebd18 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.Deployment.yaml @@ -126,6 +126,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -147,6 +151,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -169,6 +177,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -190,6 +202,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -249,6 +265,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -264,6 +282,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -456,6 +476,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -471,6 +493,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -665,6 +689,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -680,6 +706,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.json index 7b2e0f2b53e2a..b67c5a6f33b0c 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.pb index 6a627b63f8fc9..08fc1f1b5320e 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.yaml index b3ce95b2c7fcf..6d4bc4d12aea7 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta1.StatefulSet.yaml @@ -124,6 +124,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -145,6 +149,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -167,6 +175,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -188,6 +200,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -247,6 +263,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -262,6 +280,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -454,6 +474,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -469,6 +491,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -663,6 +687,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -678,6 +704,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.json index db821b6459245..5c6df44c49282 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.json @@ -690,6 +690,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -713,6 +716,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -974,6 +980,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -997,6 +1006,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1258,6 +1270,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1281,6 +1296,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1466,7 +1484,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1504,7 +1528,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1543,7 +1573,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1581,7 +1617,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.pb index 3238c5ef7f7c9..33411f8b80f75 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.yaml index b51e4ef1609f0..8ce63252da282 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.DaemonSet.yaml @@ -116,6 +116,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -137,6 +141,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -159,6 +167,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -180,6 +192,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -239,6 +255,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -254,6 +272,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -446,6 +466,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -461,6 +483,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -655,6 +679,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -670,6 +696,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.json index 75acc3db3f14f..3a7c5fe7c691c 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.pb index 7b6cc9d0d5acd..4e7fb49361a77 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.yaml index 7d9ac334bf598..7aded373e5110 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.Deployment.yaml @@ -124,6 +124,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -145,6 +149,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -167,6 +175,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -188,6 +200,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -247,6 +263,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -262,6 +280,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -454,6 +474,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -469,6 +491,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -663,6 +687,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -678,6 +704,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.json index d672bb8328bf4..9fb52a2899684 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.json @@ -692,6 +692,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -715,6 +718,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -976,6 +982,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -999,6 +1008,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1260,6 +1272,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1283,6 +1298,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1468,7 +1486,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1506,7 +1530,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1545,7 +1575,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1583,7 +1619,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.pb index 859e564283925..ddc5cff24da36 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.yaml index fa6d7ab0db1de..aa675ba48d52b 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.ReplicaSet.yaml @@ -116,6 +116,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -137,6 +141,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -159,6 +167,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -180,6 +192,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -239,6 +255,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -254,6 +272,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -446,6 +466,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -461,6 +483,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -655,6 +679,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -670,6 +696,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.json b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.json index a8daba5537844..173842cd32b1e 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.pb b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.pb index 5a644069eef1e..3f091401ece5a 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.yaml index ccd024ca3bb13..605da4f308564 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/apps.v1beta2.StatefulSet.yaml @@ -124,6 +124,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -145,6 +149,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -167,6 +175,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -188,6 +200,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -247,6 +263,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -262,6 +280,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -454,6 +474,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -469,6 +491,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -663,6 +687,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -678,6 +704,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json index a66a9e1d4f3fb..61062c835902f 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json @@ -766,6 +766,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -789,6 +792,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1050,6 +1056,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1073,6 +1082,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1334,6 +1346,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1357,6 +1372,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1542,7 +1560,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1580,7 +1604,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1619,7 +1649,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1657,7 +1693,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb index 7b2cb315a6120..0051f3a43bb11 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml index 2bfafb075e6e8..6bb4d5c516863 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml @@ -171,6 +171,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -192,6 +196,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -214,6 +222,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -235,6 +247,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -294,6 +310,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -309,6 +327,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -501,6 +521,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -516,6 +538,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -710,6 +734,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -725,6 +751,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json index 7ea2593f3db00..79a402da70e8f 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json @@ -717,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -740,6 +743,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1001,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1024,6 +1033,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1285,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1308,6 +1323,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1493,7 +1511,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1531,7 +1555,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1570,7 +1600,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1608,7 +1644,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb index 53f7e66d3b5fa..643b87435200b 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml index e34f810bb4636..122d6464ec712 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml @@ -135,6 +135,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -156,6 +160,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -178,6 +186,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -199,6 +211,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -258,6 +274,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -273,6 +291,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -465,6 +485,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -480,6 +502,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -674,6 +698,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -689,6 +715,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json index 55bc6d8744bdd..4e17905804de9 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json @@ -766,6 +766,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -789,6 +792,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1050,6 +1056,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1073,6 +1082,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1334,6 +1346,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1357,6 +1372,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1542,7 +1560,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1580,7 +1604,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1619,7 +1649,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1657,7 +1693,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb index a2d76e4eb3c53..010f565133a75 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml index c017d6752aff3..895dd090eaa80 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml @@ -171,6 +171,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -192,6 +196,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -214,6 +222,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -235,6 +247,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -294,6 +310,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -309,6 +327,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -501,6 +521,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -516,6 +538,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -710,6 +734,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -725,6 +751,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.json b/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.json index 98f22c623db1a..c302f0be48aaf 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.json +++ b/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.json @@ -632,6 +632,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -655,6 +658,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -916,6 +922,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -939,6 +948,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1200,6 +1212,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1223,6 +1238,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1408,7 +1426,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1446,7 +1470,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1485,7 +1515,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1523,7 +1559,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.pb b/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.pb index 25e1dfdbd20ff..961a662f0b0ef 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.pb and b/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.yaml b/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.yaml index 77b815b59ce38..988d02398a678 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/core.v1.Pod.yaml @@ -72,6 +72,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -93,6 +97,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -115,6 +123,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -136,6 +148,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -195,6 +211,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -210,6 +228,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -402,6 +422,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -417,6 +439,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -611,6 +635,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -626,6 +652,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.json b/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.json index 95c1048d0c73a..3d08304c9a0c0 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.json +++ b/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.json @@ -675,6 +675,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -698,6 +701,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -959,6 +965,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -982,6 +991,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1243,6 +1255,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1266,6 +1281,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1451,7 +1469,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1489,7 +1513,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1528,7 +1558,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1566,7 +1602,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.pb b/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.pb index 4a4dc9fcf2996..29a3dffd9b1a5 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.pb and b/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.yaml b/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.yaml index b7eaa8df651f9..3159f2d707be0 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/core.v1.PodTemplate.yaml @@ -105,6 +105,10 @@ template: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -126,6 +130,10 @@ template: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -148,6 +156,10 @@ template: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -169,6 +181,10 @@ template: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -228,6 +244,8 @@ template: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -243,6 +261,8 @@ template: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -435,6 +455,8 @@ template: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -450,6 +472,8 @@ template: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -644,6 +668,8 @@ template: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -659,6 +685,8 @@ template: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.json b/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.json index ada39041c6678..fb729da601d2f 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.json +++ b/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.json @@ -681,6 +681,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -704,6 +707,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -965,6 +971,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -988,6 +997,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1249,6 +1261,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1272,6 +1287,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1457,7 +1475,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1495,7 +1519,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1534,7 +1564,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1572,7 +1608,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.pb b/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.pb index d455f8fc0b5bc..d445b707e32a5 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.pb and b/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.yaml b/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.yaml index f8dad4fd4e006..29936bc64b6a5 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/core.v1.ReplicationController.yaml @@ -110,6 +110,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -131,6 +135,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -153,6 +161,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -174,6 +186,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -233,6 +249,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -248,6 +266,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -440,6 +460,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -455,6 +477,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -649,6 +673,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -664,6 +690,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.json b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.json index 6d689c7fae681..ec46f586aa766 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.json @@ -690,6 +690,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -713,6 +716,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -974,6 +980,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -997,6 +1006,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1258,6 +1270,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1281,6 +1296,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1466,7 +1484,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1504,7 +1528,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1543,7 +1573,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1581,7 +1617,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.pb b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.pb index 08020d6ecc4dc..d06b24ae1f322 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.yaml index ffa7aa920fe37..ff380e1c35db3 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.DaemonSet.yaml @@ -116,6 +116,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -137,6 +141,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -159,6 +167,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -180,6 +192,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -239,6 +255,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -254,6 +272,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -446,6 +466,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -461,6 +483,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -655,6 +679,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -670,6 +696,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.json b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.json index 5c59dcadfcf40..bbbdc7262b037 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.json +++ b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.json @@ -691,6 +691,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -714,6 +717,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -975,6 +981,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -998,6 +1007,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1259,6 +1271,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1282,6 +1297,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1467,7 +1485,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1505,7 +1529,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1544,7 +1574,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1582,7 +1618,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.pb b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.pb index 2f1ffa3e56c71..edb8b3f84a026 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.pb and b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.yaml b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.yaml index 82421c0358b6d..8ad52c63c888c 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.Deployment.yaml @@ -126,6 +126,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -147,6 +151,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -169,6 +177,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -190,6 +202,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -249,6 +265,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -264,6 +282,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -456,6 +476,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -471,6 +493,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -665,6 +689,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -680,6 +706,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.json b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.json index cea89d1f61f21..ed004b94c453c 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.json +++ b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.json @@ -692,6 +692,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -715,6 +718,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -976,6 +982,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -999,6 +1008,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1260,6 +1272,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } }, "preStop": { @@ -1283,6 +1298,9 @@ "tcpSocket": { "port": "portValue", "host": "hostValue" + }, + "sleep": { + "seconds": 1 } } }, @@ -1468,7 +1486,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1506,7 +1530,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] @@ -1545,7 +1575,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } ], "preferredDuringSchedulingIgnoredDuringExecution": [ @@ -1583,7 +1619,13 @@ ] } ] - } + }, + "matchLabelKeys": [ + "matchLabelKeysValue" + ], + "mismatchLabelKeys": [ + "mismatchLabelKeysValue" + ] } } ] diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.pb b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.pb index a52b5c8791350..29b97c382504e 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.pb and b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.yaml b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.yaml index 946290793ba4a..8a161b665fbe8 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.ReplicaSet.yaml @@ -116,6 +116,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -137,6 +141,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -159,6 +167,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -180,6 +192,10 @@ spec: - valuesValue matchLabels: matchLabelsKey: matchLabelsValue + matchLabelKeys: + - matchLabelKeysValue + mismatchLabelKeys: + - mismatchLabelKeysValue namespaceSelector: matchExpressions: - key: keyValue @@ -239,6 +255,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -254,6 +272,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -446,6 +466,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -461,6 +483,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -655,6 +679,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue @@ -670,6 +696,8 @@ spec: path: pathValue port: portValue scheme: schemeValue + sleep: + seconds: 1 tcpSocket: host: hostValue port: portValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.json b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.json index 8910af7a7232b..c5f84c6f50690 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.json +++ b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.json @@ -48,8 +48,7 @@ "group": "groupValue", "resource": "resourceValue", "namespace": "namespaceValue", - "name": "nameValue", - "uid": "uidValue" + "name": "nameValue" } } } \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.pb b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.pb index cf58bbab2dc37..7fceacd6bcacc 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.pb and b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.yaml b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.yaml index 0b591beae2b56..0bf2b17cb87f6 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.IPAddress.yaml @@ -38,4 +38,3 @@ spec: name: nameValue namespace: namespaceValue resource: resourceValue - uid: uidValue diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/admissionregistration.k8s.io.v1alpha1.ValidatingAdmissionPolicy.after_roundtrip.json b/staging/src/k8s.io/api/testdata/v1.27.0/admissionregistration.k8s.io.v1alpha1.ValidatingAdmissionPolicy.after_roundtrip.json deleted file mode 100644 index d61b666f31795..0000000000000 --- a/staging/src/k8s.io/api/testdata/v1.27.0/admissionregistration.k8s.io.v1alpha1.ValidatingAdmissionPolicy.after_roundtrip.json +++ /dev/null @@ -1,166 +0,0 @@ -{ - "kind": "ValidatingAdmissionPolicy", - "apiVersion": "admissionregistration.k8s.io/v1alpha1", - "metadata": { - "name": "nameValue", - "generateName": "generateNameValue", - "namespace": "namespaceValue", - "selfLink": "selfLinkValue", - "uid": "uidValue", - "resourceVersion": "resourceVersionValue", - "generation": 7, - "creationTimestamp": "2008-01-01T01:01:01Z", - "deletionTimestamp": "2009-01-01T01:01:01Z", - "deletionGracePeriodSeconds": 10, - "labels": { - "labelsKey": "labelsValue" - }, - "annotations": { - "annotationsKey": "annotationsValue" - }, - "ownerReferences": [ - { - "apiVersion": "apiVersionValue", - "kind": "kindValue", - "name": "nameValue", - "uid": "uidValue", - "controller": true, - "blockOwnerDeletion": true - } - ], - "finalizers": [ - "finalizersValue" - ], - "managedFields": [ - { - "manager": "managerValue", - "operation": "operationValue", - "apiVersion": "apiVersionValue", - "time": "2004-01-01T01:01:01Z", - "fieldsType": "fieldsTypeValue", - "fieldsV1": {}, - "subresource": "subresourceValue" - } - ] - }, - "spec": { - "paramKind": { - "apiVersion": "apiVersionValue", - "kind": "kindValue" - }, - "matchConstraints": { - "namespaceSelector": { - "matchLabels": { - "matchLabelsKey": "matchLabelsValue" - }, - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - }, - "objectSelector": { - "matchLabels": { - "matchLabelsKey": "matchLabelsValue" - }, - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - }, - "resourceRules": [ - { - "resourceNames": [ - "resourceNamesValue" - ], - "operations": [ - "operationsValue" - ], - "apiGroups": [ - "apiGroupsValue" - ], - "apiVersions": [ - "apiVersionsValue" - ], - "resources": [ - "resourcesValue" - ], - "scope": "scopeValue" - } - ], - "excludeResourceRules": [ - { - "resourceNames": [ - "resourceNamesValue" - ], - "operations": [ - "operationsValue" - ], - "apiGroups": [ - "apiGroupsValue" - ], - "apiVersions": [ - "apiVersionsValue" - ], - "resources": [ - "resourcesValue" - ], - "scope": "scopeValue" - } - ], - "matchPolicy": "matchPolicyValue" - }, - "validations": [ - { - "expression": "expressionValue", - "message": "messageValue", - "reason": "reasonValue", - "messageExpression": "messageExpressionValue" - } - ], - "failurePolicy": "failurePolicyValue", - "auditAnnotations": [ - { - "key": "keyValue", - "valueExpression": "valueExpressionValue" - } - ], - "matchConditions": [ - { - "name": "nameValue", - "expression": "expressionValue" - } - ], - "variables": null - }, - "status": { - "observedGeneration": 1, - "typeChecking": { - "expressionWarnings": [ - { - "fieldRef": "fieldRefValue", - "warning": "warningValue" - } - ] - }, - "conditions": [ - { - "type": "typeValue", - "status": "statusValue", - "observedGeneration": 3, - "lastTransitionTime": "2004-01-01T01:01:01Z", - "reason": "reasonValue", - "message": "messageValue" - } - ] - } -} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/admissionregistration.k8s.io.v1alpha1.ValidatingAdmissionPolicy.after_roundtrip.yaml b/staging/src/k8s.io/api/testdata/v1.27.0/admissionregistration.k8s.io.v1alpha1.ValidatingAdmissionPolicy.after_roundtrip.yaml deleted file mode 100644 index 1cc7e0a8f3236..0000000000000 --- a/staging/src/k8s.io/api/testdata/v1.27.0/admissionregistration.k8s.io.v1alpha1.ValidatingAdmissionPolicy.after_roundtrip.yaml +++ /dev/null @@ -1,106 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1alpha1 -kind: ValidatingAdmissionPolicy -metadata: - annotations: - annotationsKey: annotationsValue - creationTimestamp: "2008-01-01T01:01:01Z" - deletionGracePeriodSeconds: 10 - deletionTimestamp: "2009-01-01T01:01:01Z" - finalizers: - - finalizersValue - generateName: generateNameValue - generation: 7 - labels: - labelsKey: labelsValue - managedFields: - - apiVersion: apiVersionValue - fieldsType: fieldsTypeValue - fieldsV1: {} - manager: managerValue - operation: operationValue - subresource: subresourceValue - time: "2004-01-01T01:01:01Z" - name: nameValue - namespace: namespaceValue - ownerReferences: - - apiVersion: apiVersionValue - blockOwnerDeletion: true - controller: true - kind: kindValue - name: nameValue - uid: uidValue - resourceVersion: resourceVersionValue - selfLink: selfLinkValue - uid: uidValue -spec: - auditAnnotations: - - key: keyValue - valueExpression: valueExpressionValue - failurePolicy: failurePolicyValue - matchConditions: - - expression: expressionValue - name: nameValue - matchConstraints: - excludeResourceRules: - - apiGroups: - - apiGroupsValue - apiVersions: - - apiVersionsValue - operations: - - operationsValue - resourceNames: - - resourceNamesValue - resources: - - resourcesValue - scope: scopeValue - matchPolicy: matchPolicyValue - namespaceSelector: - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchLabels: - matchLabelsKey: matchLabelsValue - objectSelector: - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchLabels: - matchLabelsKey: matchLabelsValue - resourceRules: - - apiGroups: - - apiGroupsValue - apiVersions: - - apiVersionsValue - operations: - - operationsValue - resourceNames: - - resourceNamesValue - resources: - - resourcesValue - scope: scopeValue - paramKind: - apiVersion: apiVersionValue - kind: kindValue - validations: - - expression: expressionValue - message: messageValue - messageExpression: messageExpressionValue - reason: reasonValue - variables: null -status: - conditions: - - lastTransitionTime: "2004-01-01T01:01:01Z" - message: messageValue - observedGeneration: 3 - reason: reasonValue - status: statusValue - type: typeValue - observedGeneration: 1 - typeChecking: - expressionWarnings: - - fieldRef: fieldRefValue - warning: warningValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.json similarity index 65% rename from staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json rename to staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.json index 59fa006b52c0b..c5f84c6f50690 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json +++ b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.json @@ -1,5 +1,5 @@ { - "kind": "ClusterCIDR", + "kind": "IPAddress", "apiVersion": "networking.k8s.io/v1alpha1", "metadata": { "name": "nameValue", @@ -44,32 +44,11 @@ ] }, "spec": { - "nodeSelector": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - }, - "perNodeHostBits": 2, - "ipv4": "ipv4Value", - "ipv6": "ipv6Value" + "parentRef": { + "group": "groupValue", + "resource": "resourceValue", + "namespace": "namespaceValue", + "name": "nameValue" + } } } \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.pb similarity index 65% rename from staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb rename to staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.pb index a4e9113897a78..7fceacd6bcacc 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb and b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.yaml similarity index 71% rename from staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml rename to staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.yaml index fe7a1341fe1c2..0bf2b17cb87f6 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml +++ b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.yaml @@ -1,5 +1,5 @@ apiVersion: networking.k8s.io/v1alpha1 -kind: ClusterCIDR +kind: IPAddress metadata: annotations: annotationsKey: annotationsValue @@ -33,18 +33,8 @@ metadata: selfLink: selfLinkValue uid: uidValue spec: - ipv4: ipv4Value - ipv6: ipv6Value - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue - perNodeHostBits: 2 + parentRef: + group: groupValue + name: nameValue + namespace: namespaceValue + resource: resourceValue diff --git a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.json deleted file mode 100644 index 59fa006b52c0b..0000000000000 --- a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "kind": "ClusterCIDR", - "apiVersion": "networking.k8s.io/v1alpha1", - "metadata": { - "name": "nameValue", - "generateName": "generateNameValue", - "namespace": "namespaceValue", - "selfLink": "selfLinkValue", - "uid": "uidValue", - "resourceVersion": "resourceVersionValue", - "generation": 7, - "creationTimestamp": "2008-01-01T01:01:01Z", - "deletionTimestamp": "2009-01-01T01:01:01Z", - "deletionGracePeriodSeconds": 10, - "labels": { - "labelsKey": "labelsValue" - }, - "annotations": { - "annotationsKey": "annotationsValue" - }, - "ownerReferences": [ - { - "apiVersion": "apiVersionValue", - "kind": "kindValue", - "name": "nameValue", - "uid": "uidValue", - "controller": true, - "blockOwnerDeletion": true - } - ], - "finalizers": [ - "finalizersValue" - ], - "managedFields": [ - { - "manager": "managerValue", - "operation": "operationValue", - "apiVersion": "apiVersionValue", - "time": "2004-01-01T01:01:01Z", - "fieldsType": "fieldsTypeValue", - "fieldsV1": {}, - "subresource": "subresourceValue" - } - ] - }, - "spec": { - "nodeSelector": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - }, - "perNodeHostBits": 2, - "ipv4": "ipv4Value", - "ipv6": "ipv6Value" - } -} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb deleted file mode 100644 index a4e9113897a78..0000000000000 Binary files a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb and /dev/null differ diff --git a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml deleted file mode 100644 index fe7a1341fe1c2..0000000000000 --- a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: networking.k8s.io/v1alpha1 -kind: ClusterCIDR -metadata: - annotations: - annotationsKey: annotationsValue - creationTimestamp: "2008-01-01T01:01:01Z" - deletionGracePeriodSeconds: 10 - deletionTimestamp: "2009-01-01T01:01:01Z" - finalizers: - - finalizersValue - generateName: generateNameValue - generation: 7 - labels: - labelsKey: labelsValue - managedFields: - - apiVersion: apiVersionValue - fieldsType: fieldsTypeValue - fieldsV1: {} - manager: managerValue - operation: operationValue - subresource: subresourceValue - time: "2004-01-01T01:01:01Z" - name: nameValue - namespace: namespaceValue - ownerReferences: - - apiVersion: apiVersionValue - blockOwnerDeletion: true - controller: true - kind: kindValue - name: nameValue - uid: uidValue - resourceVersion: resourceVersionValue - selfLink: selfLinkValue - uid: uidValue -spec: - ipv4: ipv4Value - ipv6: ipv6Value - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue - perNodeHostBits: 2 diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.json similarity index 65% rename from staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.json rename to staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.json index 59fa006b52c0b..c5f84c6f50690 100644 --- a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.json +++ b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.json @@ -1,5 +1,5 @@ { - "kind": "ClusterCIDR", + "kind": "IPAddress", "apiVersion": "networking.k8s.io/v1alpha1", "metadata": { "name": "nameValue", @@ -44,32 +44,11 @@ ] }, "spec": { - "nodeSelector": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - }, - "perNodeHostBits": 2, - "ipv4": "ipv4Value", - "ipv6": "ipv6Value" + "parentRef": { + "group": "groupValue", + "resource": "resourceValue", + "namespace": "namespaceValue", + "name": "nameValue" + } } } \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.pb similarity index 65% rename from staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb rename to staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.pb index a4e9113897a78..7fceacd6bcacc 100644 Binary files a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb and b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.pb differ diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.yaml similarity index 71% rename from staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml rename to staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.yaml index fe7a1341fe1c2..0bf2b17cb87f6 100644 --- a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml +++ b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.IPAddress.after_roundtrip.yaml @@ -1,5 +1,5 @@ apiVersion: networking.k8s.io/v1alpha1 -kind: ClusterCIDR +kind: IPAddress metadata: annotations: annotationsKey: annotationsValue @@ -33,18 +33,8 @@ metadata: selfLink: selfLinkValue uid: uidValue spec: - ipv4: ipv4Value - ipv6: ipv6Value - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue - perNodeHostBits: 2 + parentRef: + group: groupValue + name: nameValue + namespace: namespaceValue + resource: resourceValue diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index 4240fe9e1c001..fe76ab4cf1da7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -2,10 +2,10 @@ module k8s.io/apiextensions-apiserver -go 1.20 +go 1.21.3 require ( - github.com/emicklei/go-restful/v3 v3.9.0 + github.com/emicklei/go-restful/v3 v3.11.0 github.com/evanphx/json-patch v4.12.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/google/cel-go v0.17.6 @@ -15,13 +15,13 @@ require ( github.com/google/uuid v1.3.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 go.etcd.io/etcd/client/pkg/v3 v3.5.9 go.etcd.io/etcd/client/v3 v3.5.9 - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/trace v1.10.0 - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 - google.golang.org/grpc v1.54.0 + go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel/trace v1.19.0 + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e + google.golang.org/grpc v1.58.2 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.0.0 @@ -31,7 +31,7 @@ require ( k8s.io/code-generator v0.0.0 k8s.io/component-base v0.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/structured-merge-diff/v4 v4.3.0 @@ -51,7 +51,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -65,7 +65,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect @@ -73,9 +73,11 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect @@ -93,37 +95,36 @@ require ( go.etcd.io/etcd/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/raft/v3 v3.5.9 // indirect go.etcd.io/etcd/server/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/kms v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect ) replace ( diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 41c77af71755a..d9cea422e15a0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -1,168 +1,135 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -176,23 +143,13 @@ github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqy github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -208,27 +165,22 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -257,40 +209,18 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= @@ -298,37 +228,19 @@ github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulN github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -339,11 +251,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -355,8 +264,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -375,6 +282,7 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -385,11 +293,12 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -408,7 +317,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -418,7 +326,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -437,16 +344,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= @@ -466,32 +371,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BC go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -507,294 +404,113 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -802,7 +518,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -818,26 +533,18 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index 875b3e6de079f..c22a6f1c62bcb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -325,7 +325,7 @@ func validateCustomResourceDefinitionSpec(ctx context.Context, spec *apiextensio } if opts.allowDefaults && specHasDefaults(spec) { opts.requireStructuralSchema = true - if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields == true { + if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields { allErrs = append(allErrs, field.Invalid(fldPath.Child("preserveUnknownFields"), true, "must be false in order to use defaults in the schema")) } } @@ -873,7 +873,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch } allErrs.SchemaErrors = append(allErrs.SchemaErrors, ssv.validate(schema, fldPath)...) - if schema.UniqueItems == true { + if schema.UniqueItems { allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("uniqueItems"), "uniqueItems cannot be set to true since the runtime complexity becomes quadratic")) } @@ -888,7 +888,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch // restricted like additionalProperties. if schema.AdditionalProperties != nil { if len(schema.Properties) != 0 { - if schema.AdditionalProperties.Allows == false || schema.AdditionalProperties.Schema != nil { + if !schema.AdditionalProperties.Allows || schema.AdditionalProperties.Schema != nil { allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("additionalProperties"), "additionalProperties and properties are mutual exclusive")) } } @@ -977,7 +977,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch } } - if schema.XPreserveUnknownFields != nil && *schema.XPreserveUnknownFields == false { + if schema.XPreserveUnknownFields != nil && !*schema.XPreserveUnknownFields { allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), *schema.XPreserveUnknownFields, "must be true or undefined")) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go index 7f1af4a1a9e53..517fc9e531a76 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go @@ -43,12 +43,10 @@ import ( "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/apiserver/pkg/endpoints/discovery/aggregated" - "k8s.io/apiserver/pkg/features" genericregistry "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/webhook" ) @@ -243,7 +241,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) go openapiController.Run(s.GenericAPIServer.StaticOpenAPISpec, s.GenericAPIServer.OpenAPIVersionedService, context.StopCh) } - if s.GenericAPIServer.OpenAPIV3VersionedService != nil && utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { + if s.GenericAPIServer.OpenAPIV3VersionedService != nil { openapiv3Controller := openapiv3controller.NewController(s.Informers.Apiextensions().V1().CustomResourceDefinitions()) go openapiv3Controller.Run(s.GenericAPIServer.OpenAPIV3VersionedService, context.StopCh) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 2a6dcb80a7c75..39fa18c57922b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -805,7 +805,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd kind, validator, statusValidator, - structuralSchemas, + structuralSchemas[v.Name], statusSpec, scaleSpec, ), @@ -833,6 +833,45 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd structuralSchemas: structuralSchemas, structuralSchemaGK: kind.GroupKind(), preserveUnknownFields: crd.Spec.PreserveUnknownFields, + supportedMediaTypes: []runtime.SerializerInfo{ + { + MediaType: "application/json", + MediaTypeType: "application", + MediaTypeSubType: "json", + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, creator, typer, false), + PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, creator, typer, true), + StrictSerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, creator, typer, json.SerializerOptions{ + Strict: true, + }), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, creator, typer, false), + Framer: json.Framer, + }, + }, + { + MediaType: "application/yaml", + MediaTypeType: "application", + MediaTypeSubType: "yaml", + EncodesAsText: true, + Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, creator, typer), + StrictSerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, creator, typer, json.SerializerOptions{ + Yaml: true, + Strict: true, + }), + }, + { + MediaType: "application/vnd.kubernetes.protobuf", + MediaTypeType: "application", + MediaTypeSubType: "vnd.kubernetes.protobuf", + Serializer: protobuf.NewSerializer(creator, typer), + StreamSerializer: &runtime.StreamSerializerInfo{ + Serializer: protobuf.NewRawSerializer(creator, typer), + Framer: protobuf.LengthDelimitedFramer, + }, + }, + }, } var standardSerializers []runtime.SerializerInfo for _, s := range negotiatedSerializer.SupportedMediaTypes() { @@ -1021,48 +1060,12 @@ type unstructuredNegotiatedSerializer struct { structuralSchemas map[string]*structuralschema.Structural // by version structuralSchemaGK schema.GroupKind preserveUnknownFields bool + + supportedMediaTypes []runtime.SerializerInfo } func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { - return []runtime.SerializerInfo{ - { - MediaType: "application/json", - MediaTypeType: "application", - MediaTypeSubType: "json", - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, true), - StrictSerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, s.creator, s.typer, json.SerializerOptions{ - Strict: true, - }), - StreamSerializer: &runtime.StreamSerializerInfo{ - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), - Framer: json.Framer, - }, - }, - { - MediaType: "application/yaml", - MediaTypeType: "application", - MediaTypeSubType: "yaml", - EncodesAsText: true, - Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer), - StrictSerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, s.creator, s.typer, json.SerializerOptions{ - Yaml: true, - Strict: true, - }), - }, - { - MediaType: "application/vnd.kubernetes.protobuf", - MediaTypeType: "application", - MediaTypeSubType: "vnd.kubernetes.protobuf", - Serializer: protobuf.NewSerializer(s.creator, s.typer), - StreamSerializer: &runtime.StreamSerializerInfo{ - Serializer: protobuf.NewRawSerializer(s.creator, s.typer), - Framer: protobuf.LengthDelimitedFramer, - }, - }, - } + return s.supportedMediaTypes } func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/celcoststability_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/celcoststability_test.go index 5e1a9bbca6704..beeb181e50984 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/celcoststability_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/celcoststability_test.go @@ -116,6 +116,11 @@ func TestCelCostStability(t *testing.T) { "self.val1.upperAscii() == 'ROOK TAKES 👑'": 6, "self.val1.lowerAscii() == 'rook takes 👑'": 6, "self.val1.lowerAscii() == self.val1.lowerAscii()": 10, + // strings version 2 + "'%d %s %f %s %s'.format([1, 'abc', 1.0, duration('1m'), timestamp('2000-01-01T00:00:00.000Z')]) == '1 abc 1.000000 60s 2000-01-01T00:00:00Z'": 6, + "'%e'.format([3.14]) == '3.140000 × 10⁰⁰'": 3, + "'%o %o %o'.format([7, 8, 9]) == '7 10 11'": 2, + "'%b %b %b'.format([7, 8, 9]) == '111 1000 1001'": 3, }, }, {name: "escaped strings", @@ -1151,3 +1156,792 @@ func TestCelCostStability(t *testing.T) { }) } } + +func TestCelEstimatedCostStability(t *testing.T) { + cases := []struct { + name string + schema *schema.Structural + expectCost map[string]uint64 + }{ + {name: "integers", + // 1st obj and schema args are for "self.val1" field, 2nd for "self.val2" and so on. + schema: schemas(integerType, integerType, int32Type, int32Type, int64Type, int64Type), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", fmt.Sprintf("%d", math.MaxInt64)): 8, + "self.val1 == self.val6": 4, // integer with no format is the same as int64 + "type(self.val1) == int": 4, + fmt.Sprintf("self.val3 + 1 == %d + 1", math.MaxInt32): 5, // CEL integers are 64 bit + }, + }, + {name: "numbers", + schema: schemas(numberType, numberType, floatType, floatType, doubleType, doubleType, doubleType), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", fmt.Sprintf("%f", math.MaxFloat64)): 8, + "self.val1 == self.val6": 4, // number with no format is the same as float64 + "type(self.val1) == double": 4, + + // Use a int64 value with a number openAPI schema type since float representations of whole numbers + // (e.g. 1.0, 0.0) can convert to int representations (e.g. 1, 0) in yaml to json translation, and + // then get parsed as int64s. + "type(self.val7) == double": 4, + "self.val7 == 1.0": 2, + }, + }, + {name: "numeric comparisons", + schema: schemas(integerType, numberType, floatType, doubleType, numberType, floatType, doubleType), + expectCost: map[string]uint64{ + // xref: https://github.com/google/cel-spec/wiki/proposal-210 + + // compare integers with all float types + "double(self.val1) < self.val4": 6, + "self.val1 < int(self.val4)": 6, + "double(self.val1) < self.val5": 6, + "self.val1 < int(self.val5)": 6, + "double(self.val1) < self.val6": 6, + "self.val1 < int(self.val6)": 6, + + // compare literal integers and floats + "double(5) < 10.0": 2, + "5 < int(10.0)": 2, + + // compare integers with literal floats + "double(self.val1) < 10.0": 4, + }, + }, + {name: "unicode strings", + schema: schemas(stringType, stringType), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "'Rook takes 👑'"): 314585, + "self.val1.startsWith('Rook')": 3, + "!self.val1.startsWith('knight')": 4, + "self.val1.matches('^[^0-9]*$')": 943721, + "!self.val1.matches('^[0-9]*$')": 629149, + "type(self.val1) == string": 4, + "size(self.val1) == 12": 4, + + // string functions (https://github.com/google/cel-go/blob/v0.9.0/ext/strings.go) + "self.val1.charAt(3) == 'k'": 4, + "self.val1.indexOf('o') == 1": 314576, + "self.val1.indexOf('o', 2) == 2": 314576, + "self.val1.replace(' ', 'x') == 'Rookxtakesx👑'": 629150, + "self.val1.replace(' ', 'x', 1) == 'Rookxtakes 👑'": 629150, + "self.val1.split(' ') == ['Rook', 'takes', '👑']": 629159, + "self.val1.split(' ', 2) == ['Rook', 'takes 👑']": 629159, + "self.val1.substring(5) == 'takes 👑'": 314576, + "self.val1.substring(0, 4) == 'Rook'": 314576, + "self.val1.substring(4, 10).trim() == 'takes'": 629149, + "self.val1.upperAscii() == 'ROOK TAKES 👑'": 314577, + "self.val1.lowerAscii() == 'rook takes 👑'": 314577, + "self.val1.lowerAscii() == self.val1.lowerAscii()": 943723, + }, + }, + {name: "escaped strings", + schema: schemas(stringType, stringType), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "'l1\\nl2'"): 314583, + "self.val1 == '''l1\nl2'''": 3, + }, + }, + {name: "bytes", + schema: schemas(byteType, byteType), + expectCost: map[string]uint64{ + "self.val1 == self.val2": 314577, + "self.val1 == b'AB'": 3, + "type(self.val1) == bytes": 4, + "size(self.val1) == 2": 4, + }, + }, + {name: "booleans", + schema: schemas(booleanType, booleanType, booleanType, booleanType), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "true"): 8, + "self.val1 != self.val4": 4, + "type(self.val1) == bool": 4, + }, + }, + {name: "duration format", + schema: schemas(durationFormat, durationFormat), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "duration('1h2m3s4ms')"): 16, + "self.val1 == duration('1h2m') + duration('3s4ms')": 6, + "self.val1.getHours() == 1": 4, + "type(self.val1) == google.protobuf.Duration": 4, + }, + }, + {name: "date format", + schema: schemas(dateFormat, dateFormat), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "timestamp('1997-07-16T00:00:00.000Z')"): 14, + "self.val1.getDate() == 16": 4, + "type(self.val1) == google.protobuf.Timestamp": 4, + }, + }, + {name: "date-time format", + schema: schemas(dateTimeFormat, dateTimeFormat), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "timestamp('2011-08-18T19:03:37.010+01:00')"): 16, + "self.val1 == timestamp('2011-08-18T00:00:00.000+01:00') + duration('19h3m37s10ms')": 6, + "self.val1.getDate('01:00') == 18": 4, + "type(self.val1) == google.protobuf.Timestamp": 4, + }, + }, + {name: "enums", + schema: objectTypePtr(map[string]schema.Structural{"enumStr": { + Generic: schema.Generic{ + Type: "string", + }, + ValueValidation: &schema.ValueValidation{ + Enum: []schema.JSON{ + {Object: "Pending"}, + {Object: "Available"}, + {Object: "Bound"}, + {Object: "Released"}, + {Object: "Failed"}, + }, + }, + }}), + expectCost: map[string]uint64{ + "self.enumStr == 'Pending'": 3, + "self.enumStr in ['Pending', 'Available']": 14, + }, + }, + {name: "conversions", + schema: schemas(integerType, numberType, numberType, numberType, booleanType, stringType, byteType, stringType, durationFormat, stringType, dateTimeFormat, stringType, dateFormat), + expectCost: map[string]uint64{ + "int(self.val2) == self.val1": 5, + "double(self.val1) == self.val2": 5, + "bytes(self.val6) == self.val7": 629150, + "string(self.val1) == self.val6": 314578, + "string(self.val4) == '10.5'": 4, + "string(self.val7) == self.val6": 629150, + "duration(self.val8) == self.val9": 6, + "timestamp(self.val10) == self.val11": 6, + "string(self.val11) == self.val10": 314578, + "timestamp(self.val12) == self.val13": 6, + "string(self.val13) == self.val12": 314578, + }, + }, + {name: "lists", + schema: schemas(listType(&integerType), listType(&integerType)), + expectCost: map[string]uint64{ + ValsEqualThemselvesAndDataLiteral("self.val1", "self.val2", "[1, 2, 3]"): 157317, + "1 in self.val1": 1572865, + "self.val2[0] in self.val1": 1572868, + "!(0 in self.val1)": 1572866, + "self.val1 + self.val2 == [1, 2, 3, 1, 2, 3]": 16, + "self.val1 + [4, 5] == [1, 2, 3, 4, 5]": 24, + "has(self.val1)": 2, + "has(self.val1) && has(self.val2)": 4, + "!has(self.val1)": 3, + "self.val1.all(k, size(self.val1) > 0)": 11010044, + "self.val1.exists_one(k, self.val1 == [2])": 23592949, + "!self.val1.exists_one(k, size(self.val1) > 0)": 9437183, + "size(self.val1) == 2": 4, + "size(self.val1.filter(k, size(self.val1) > 1)) == 1": 26738686, + }, + }, + {name: "listSets", + schema: schemas(listSetType(&stringType), listSetType(&stringType)), + expectCost: map[string]uint64{ + // equal even though order is different + "self.val1 == ['c', 'b', 'a']": 13, + "self.val1 == self.val2": 104862, + "'a' in self.val1": 1048577, + "self.val2[0] in self.val1": 1048580, + "!('x' in self.val1)": 1048578, + "self.val1 + self.val2 == ['a', 'b', 'c']": 16, + "self.val1 + ['c', 'd'] == ['a', 'b', 'c', 'd']": 24, + "has(self.val1)": 2, + "has(self.val1) && has(self.val2)": 4, + "!has(self.val1)": 3, + "self.val1.all(k, size(self.val1) > 0)": 7340028, + "self.val1.exists_one(k, self.val1 == ['a'])": 15728629, + "!self.val1.exists_one(k, size(self.val1) > 0)": 6291455, + "size(self.val1) == 2": 4, + "size(self.val1.filter(k, size(self.val1) > 1)) == 1": 17825790, + }, + }, + {name: "listMaps", + schema: objectTypePtr(map[string]schema.Structural{ + "objs": listType(listMapTypePtr([]string{"k"}, objectTypePtr(map[string]schema.Structural{ + "k": stringType, + "v": stringType, + }))), + }), + expectCost: map[string]uint64{ + "self.objs[0] == self.objs[1]": 104864, // equal even though order is different + "self.objs[0] + self.objs[2] == self.objs[2]": 104868, // rhs overwrites lhs values + "self.objs[2] + self.objs[0] == self.objs[0]": 104868, + + "self.objs[0] == [self.objs[0][0], self.objs[0][1]]": 22, // equal against a declared list + "self.objs[0] == [self.objs[0][1], self.objs[0][0]]": 22, + + "self.objs[2] + [self.objs[0][0], self.objs[0][1]] == self.objs[0]": 104883, // concat against a declared list + "size(self.objs[0] + [self.objs[3][0]]) == 3": 20, + "has(self.objs)": 2, + "has(self.objs) && has(self.objs)": 4, + "!has(self.objs)": 3, + "self.objs[0].all(k, size(self.objs[0]) > 0)": 8388604, + "self.objs[0].exists_one(k, size(self.objs[0]) > 0)": 7340030, + "!self.objs[0].exists_one(k, size(self.objs[0]) > 0)": 7340031, + "size(self.objs[0]) == 2": 5, + "size(self.objs[0].filter(k, size(self.objs[0]) > 1)) == 1": 18874366, + }, + }, + {name: "maps", + schema: schemas(mapType(&stringType), mapType(&stringType)), + expectCost: map[string]uint64{ + "self.val1 == self.val2": 39326, // equal even though order is different + "'k1' in self.val1": 3, + "!('k3' in self.val1)": 4, + "self.val1 == {'k1': 'a', 'k2': 'b'}": 33, + "has(self.val1)": 2, + "has(self.val1) && has(self.val2)": 4, + "!has(self.val1)": 3, + "self.val1.all(k, size(self.val1) > 0)": 2752508, + "self.val1.exists_one(k, size(self.val1) > 0)": 2359294, + "!self.val1.exists_one(k, size(self.val1) > 0)": 2359295, + "size(self.val1) == 2": 4, + "size(self.val1.filter(k, size(self.val1) > 1)) == 1": 6684670, + }, + }, + {name: "objects", + schema: objectTypePtr(map[string]schema.Structural{ + "objs": listType(objectTypePtr(map[string]schema.Structural{ + "f1": stringType, + "f2": stringType, + })), + }), + expectCost: map[string]uint64{ + "self.objs[0] == self.objs[1]": 6, + }, + }, + {name: "object access", + schema: objectTypePtr(map[string]schema.Structural{ + "a": objectType(map[string]schema.Structural{ + "b": integerType, + "c": integerType, + "d": withNullable(true, integerType), + }), + "a1": objectType(map[string]schema.Structural{ + "b1": objectType(map[string]schema.Structural{ + "c1": integerType, + }), + "d2": objectType(map[string]schema.Structural{ + "e2": integerType, + }), + }), + }), + // https://github.com/google/cel-spec/blob/master/doc/langdef.md#field-selection + expectCost: map[string]uint64{ + "has(self.a.b)": 3, + "has(self.a1.b1.c1)": 4, + "!(has(self.a1.d2) && has(self.a1.d2.e2))": 8, // must check intermediate optional fields (see below no such key error for d2) + "!has(self.a1.d2)": 4, + "has(self.a)": 2, + "has(self.a) && has(self.a1)": 4, + "!has(self.a)": 3, + }, + }, + {name: "map access", + schema: objectTypePtr(map[string]schema.Structural{ + "val": mapType(&integerType), + }), + expectCost: map[string]uint64{ + // idiomatic map access + "!('a' in self.val)": 4, + "'b' in self.val": 3, + "!('c' in self.val)": 4, + "'d' in self.val": 3, + // field selection also possible if map key is a valid CEL identifier + "!has(self.val.a)": 4, + "has(self.val.b)": 3, + "!has(self.val.c)": 4, + "has(self.val.d)": 3, + "self.val.all(k, self.val[k] > 0)": 3595115, + "self.val.exists_one(k, self.val[k] == 2)": 2696338, + "!self.val.exists_one(k, self.val[k] > 0)": 3145728, + "size(self.val) == 2": 4, + "size(self.val.filter(k, self.val[k] > 1)) == 1": 8089017, + }, + }, + {name: "listMap access", + schema: objectTypePtr(map[string]schema.Structural{ + "listMap": listMapType([]string{"k"}, objectTypePtr(map[string]schema.Structural{ + "k": stringType, + "v": stringType, + "v2": stringType, + })), + }), + expectCost: map[string]uint64{ + "has(self.listMap[0].v)": 4, + "self.listMap.all(m, m.k.startsWith('a'))": 6291453, + "self.listMap.all(m, !has(m.v2) || m.v2 == 'z')": 9437178, + "self.listMap.exists(m, m.k.endsWith('1'))": 7340028, + "self.listMap.exists_one(m, m.k == 'a3')": 5242879, + "!self.listMap.all(m, m.k.endsWith('1'))": 6291454, + "!self.listMap.exists(m, m.v == 'x')": 7340029, + "!self.listMap.exists_one(m, m.k.startsWith('a'))": 5242880, + "size(self.listMap.filter(m, m.k == 'a1')) == 1": 16777215, + "self.listMap.exists(m, m.k == 'a1' && m.v == 'b1')": 10485753, + "self.listMap.map(m, m.v).exists(v, v == 'b1')": uint64(18446744073709551615), + + // test comprehensions where the field used in predicates is unset on all but one of the elements: + // - with has checks: + + "self.listMap.exists(m, has(m.v2) && m.v2 == 'z')": 9437178, + "!self.listMap.all(m, has(m.v2) && m.v2 != 'z')": 8388604, + "self.listMap.exists_one(m, has(m.v2) && m.v2 == 'z')": 7340029, + "self.listMap.filter(m, has(m.v2) && m.v2 == 'z').size() == 1": 18874365, + // undocumented overload of map that takes a filter argument. This is the same as .filter().map() + "self.listMap.map(m, has(m.v2) && m.v2 == 'z', m.v2).size() == 1": 19922940, + "self.listMap.filter(m, has(m.v2) && m.v2 == 'z').map(m, m.v2).size() == 1": uint64(18446744073709551615), + // - without has checks: + + // all() and exists() macros ignore errors from predicates so long as the condition holds for at least one element + "self.listMap.exists(m, m.v2 == 'z')": 7340028, + "!self.listMap.all(m, m.v2 != 'z')": 6291454, + }, + }, + {name: "list access", + schema: objectTypePtr(map[string]schema.Structural{ + "array": listType(&integerType), + }), + expectCost: map[string]uint64{ + "2 in self.array": 1572865, + "self.array.all(e, e > 0)": 7864318, + "self.array.exists(e, e > 2)": 9437181, + "self.array.exists_one(e, e > 4)": 6291456, + "!self.array.all(e, e < 2)": 7864319, + "!self.array.exists(e, e < 0)": 9437182, + "!self.array.exists_one(e, e == 2)": 4718594, + "self.array.all(e, e < 100)": 7864318, + "size(self.array.filter(e, e%2 == 0)) == 3": 25165823, + "self.array.map(e, e * 20).filter(e, e > 50).exists(e, e == 60)": uint64(18446744073709551615), + "size(self.array) == 8": 4, + }, + }, + {name: "listSet access", + schema: objectTypePtr(map[string]schema.Structural{ + "set": listType(&integerType), + }), + expectCost: map[string]uint64{ + "3 in self.set": 1572865, + "self.set.all(e, e > 0)": 7864318, + "self.set.exists(e, e > 3)": 9437181, + "self.set.exists_one(e, e == 3)": 4718593, + "!self.set.all(e, e < 3)": 7864319, + "!self.set.exists(e, e < 0)": 9437182, + "!self.set.exists_one(e, e > 3)": 6291457, + "self.set.all(e, e < 10)": 7864318, + "size(self.set.filter(e, e%2 == 0)) == 2": 25165823, + "self.set.map(e, e * 20).filter(e, e > 50).exists_one(e, e == 60)": uint64(18446744073709551615), + "size(self.set) == 5": 4, + }, + }, + {name: "typemeta and objectmeta access specified", + schema: objectTypePtr(map[string]schema.Structural{ + "kind": stringType, + "apiVersion": stringType, + "metadata": objectType(map[string]schema.Structural{ + "name": stringType, + "generateName": stringType, + }), + }), + expectCost: map[string]uint64{ + "self.kind == 'Pod'": 3, + "self.apiVersion == 'v1'": 3, + "self.metadata.name == 'foo'": 4, + "self.metadata.generateName == 'pickItForMe'": 5, + }, + }, + + // Kubernetes special types + {name: "embedded object", + schema: objectTypePtr(map[string]schema.Structural{ + "embedded": { + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XEmbeddedResource: true, + }, + }, + }), + expectCost: map[string]uint64{ + // 'kind', 'apiVersion', 'metadata.name' and 'metadata.generateName' are always accessible + // even if not specified in the schema. + "self.embedded.kind == 'Pod'": 4, + "self.embedded.apiVersion == 'v1'": 4, + "self.embedded.metadata.name == 'foo'": 5, + "self.embedded.metadata.generateName == 'pickItForMe'": 6, + }, + }, + {name: "embedded object with properties", + schema: objectTypePtr(map[string]schema.Structural{ + "embedded": { + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XEmbeddedResource: true, + }, + Properties: map[string]schema.Structural{ + "kind": stringType, + "apiVersion": stringType, + "metadata": objectType(map[string]schema.Structural{ + "name": stringType, + "generateName": stringType, + }), + "spec": objectType(map[string]schema.Structural{ + "field1": stringType, + }), + }, + }, + }), + expectCost: map[string]uint64{ + // in this case 'kind', 'apiVersion', 'metadata.name' and 'metadata.generateName' are specified in the + // schema, but they would be accessible even if they were not + "self.embedded.kind == 'Pod'": 4, + "self.embedded.apiVersion == 'v1'": 4, + "self.embedded.metadata.name == 'foo'": 5, + "self.embedded.metadata.generateName == 'pickItForMe'": 6, + // the specified embedded fields are accessible + "self.embedded.spec.field1 == 'a'": 5, + }, + }, + {name: "embedded object with preserve unknown", + schema: objectTypePtr(map[string]schema.Structural{ + "embedded": { + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + XEmbeddedResource: true, + }, + }, + }), + expectCost: map[string]uint64{ + // 'kind', 'apiVersion', 'metadata.name' and 'metadata.generateName' are always accessible + // even if not specified in the schema, regardless of if x-kubernetes-preserve-unknown-fields is set. + "self.embedded.kind == 'Pod'": 4, + "self.embedded.apiVersion == 'v1'": 4, + "self.embedded.metadata.name == 'foo'": 5, + "self.embedded.metadata.generateName == 'pickItForMe'": 6, + + // the object exists + "has(self.embedded)": 2, + }, + }, + {name: "string in intOrString", + schema: objectTypePtr(map[string]schema.Structural{ + "something": intOrStringType(), + }), + expectCost: map[string]uint64{ + // typical int-or-string usage would be to check both types + "type(self.something) == int ? self.something == 1 : self.something == '25%'": 7, + // to require the value be a particular type, guard it with a runtime type check + "type(self.something) == string && self.something == '25%'": 7, + + // In Kubernetes 1.24 and later, the CEL type returns false for an int-or-string comparison against the + // other type, making it safe to write validation rules like: + "self.something == '25%'": 3, + "self.something != 1": 3, + "self.something == 1 || self.something == '25%'": 6, + "self.something == '25%' || self.something == 1": 6, + + // Because the type is dynamic it receives no type checking, and evaluates to false when compared to + // other types at runtime. + "self.something != ['anything']": 13, + }, + }, + {name: "int in intOrString", + schema: objectTypePtr(map[string]schema.Structural{ + "something": intOrStringType(), + }), + expectCost: map[string]uint64{ + // typical int-or-string usage would be to check both types + "type(self.something) == int ? self.something == 1 : self.something == '25%'": 7, + // to require the value be a particular type, guard it with a runtime type check + "type(self.something) == int && self.something == 1": 7, + + // In Kubernetes 1.24 and later, the CEL type returns false for an int-or-string comparison against the + // other type, making it safe to write validation rules like: + "self.something == 1": 3, + "self.something != 'some string'": 4, + "self.something == 1 || self.something == '25%'": 6, + "self.something == '25%' || self.something == 1": 6, + + // Because the type is dynamic it receives no type checking, and evaluates to false when compared to + // other types at runtime. + "self.something != ['anything']": 13, + }, + }, + {name: "null in intOrString", + schema: objectTypePtr(map[string]schema.Structural{ + "something": withNullable(true, intOrStringType()), + }), + expectCost: map[string]uint64{ + "!has(self.something)": 3, + }, + }, + {name: "percent comparison using intOrString", + schema: objectTypePtr(map[string]schema.Structural{ + "min": intOrStringType(), + "current": integerType, + "available": integerType, + }), + expectCost: map[string]uint64{ + // validate that if 'min' is a string that it is a percentage + `type(self.min) == string && self.min.matches(r'(\d+(\.\d+)?%)')`: 1258298, + // validate that 'min' can be either a exact value minimum, or a minimum as a percentage of 'available' + "type(self.min) == int ? self.current <= self.min : double(self.current) / double(self.available) >= double(self.min.replace('%', '')) / 100.0": 629162, + }, + }, + {name: "preserve unknown fields", + schema: objectTypePtr(map[string]schema.Structural{ + "withUnknown": { + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }, + "withUnknownList": listType(&schema.Structural{ + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }), + "withUnknownFieldList": listType(&schema.Structural{ + Generic: schema.Generic{Type: "object"}, + Properties: map[string]schema.Structural{ + "fieldOfUnknownType": { + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }, + }, + }), + "anyvalList": listType(&schema.Structural{ + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }), + "anyvalMap": mapType(&schema.Structural{ + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }), + "anyvalField1": { + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }, + "anyvalField2": { + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + }, + }), + expectCost: map[string]uint64{ + "has(self.withUnknown)": 2, + "self.withUnknownList.size() == 5": 4, + // fields that are unknown because they were not specified on the object schema are included in equality checks + "self.withUnknownList[0] != self.withUnknownList[1]": 6, + "self.withUnknownList[1] == self.withUnknownList[2]": 6, + "self.withUnknownList[3] == self.withUnknownList[4]": 6, + + // fields specified on the object schema that are unknown because the field's schema is unknown are also included equality checks + "self.withUnknownFieldList[0] != self.withUnknownFieldList[1]": 6, + "self.withUnknownFieldList[1] == self.withUnknownFieldList[2]": 6, + }, + }, + {name: "known and unknown fields", + schema: &schema.Structural{ + Generic: schema.Generic{ + Type: "object", + }, + Properties: map[string]schema.Structural{ + "withUnknown": { + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + Properties: map[string]schema.Structural{ + "known": integerType, + }, + }, + "withUnknownList": listType(&schema.Structural{ + Generic: schema.Generic{Type: "object"}, + Extensions: schema.Extensions{ + XPreserveUnknownFields: true, + }, + Properties: map[string]schema.Structural{ + "known": integerType, + }, + }), + }, + }, + expectCost: map[string]uint64{ + "self.withUnknown.known == 1": 3, + // if the unknown fields are the same, they are equal + "self.withUnknownList[1] == self.withUnknownList[2]": 6, + + // if unknown fields are different, they are not equal + "self.withUnknownList[0] != self.withUnknownList[1]": 6, + "self.withUnknownList[0] != self.withUnknownList[3]": 6, + "self.withUnknownList[0] != self.withUnknownList[5]": 6, + + // if all fields are known, equality works as usual + "self.withUnknownList[3] == self.withUnknownList[4]": 6, + "self.withUnknownList[4] != self.withUnknownList[5]": 6, + }, + }, + {name: "field nullability", + schema: objectTypePtr(map[string]schema.Structural{ + "unsetPlainStr": stringType, + "unsetDefaultedStr": withDefault("default value", stringType), + "unsetNullableStr": withNullable(true, stringType), + + "setPlainStr": stringType, + "setDefaultedStr": withDefault("default value", stringType), + "setNullableStr": withNullable(true, stringType), + "setToNullNullableStr": withNullable(true, stringType), + }), + expectCost: map[string]uint64{ + "!has(self.unsetPlainStr)": 3, + "has(self.unsetDefaultedStr) && self.unsetDefaultedStr == 'default value'": 6, + "!has(self.unsetNullableStr)": 3, + + "has(self.setPlainStr) && self.setPlainStr == 'v1'": 5, + "has(self.setDefaultedStr) && self.setDefaultedStr == 'v2'": 5, + "has(self.setNullableStr) && self.setNullableStr == 'v3'": 5, + // We treat null fields as absent fields, not as null valued fields. + // Note that this is different than how we treat nullable list items or map values. + "type(self.setNullableStr) != null_type": 4, + + // a field that is set to null is treated the same as an absent field in validation rules + "!has(self.setToNullNullableStr)": 3, + }, + }, + {name: "null values in container types", + schema: objectTypePtr(map[string]schema.Structural{ + "m": mapType(withNullablePtr(true, stringType)), + "l": listType(withNullablePtr(true, stringType)), + "s": listSetType(withNullablePtr(true, stringType)), + }), + expectCost: map[string]uint64{ + "self.m.size() == 2": 4, + "'a' in self.m": 3, + "type(self.m['a']) == null_type": 5, // null check using runtime type checking + }, + }, + {name: "object types are not accessible", + schema: objectTypePtr(map[string]schema.Structural{ + "nestedInMap": mapType(objectTypePtr(map[string]schema.Structural{ + "inMapField": integerType, + })), + "nestedInList": listType(objectTypePtr(map[string]schema.Structural{ + "inListField": integerType, + })), + }), + expectCost: map[string]uint64{ + // we do not expose a stable type for the self variable, even when it is an object that CEL + // considers a named type. The only operation developers should be able to perform on the type is + // equality checking. + "type(self) == type(self)": uint64(1844674407370955268), + "type(self.nestedInMap['k1']) == type(self.nestedInMap['k2'])": uint64(1844674407370955272), + }, + }, + {name: "listMaps with unsupported identity characters in property names", + schema: objectTypePtr(map[string]schema.Structural{ + "objs": listType(listMapTypePtr([]string{"k!", "k."}, objectTypePtr(map[string]schema.Structural{ + "k!": stringType, + "k.": stringType, + }))), + }), + expectCost: map[string]uint64{ + "self.objs[0] == self.objs[1]": 104864, // equal even though order is different + "self.objs[0][0].k__dot__ == '1'": 6, // '.' is a supported character in identifiers, but it is escaped + }, + }, + {name: "container type composition", + schema: objectTypePtr(map[string]schema.Structural{ + "obj": objectType(map[string]schema.Structural{ + "field": stringType, + }), + "mapOfMap": mapType(mapTypePtr(&stringType)), + "mapOfObj": mapType(objectTypePtr(map[string]schema.Structural{ + "field2": stringType, + })), + "mapOfListMap": mapType(listMapTypePtr([]string{"k"}, objectTypePtr(map[string]schema.Structural{ + "k": stringType, + "v": stringType, + }))), + "mapOfList": mapType(listTypePtr(&stringType)), + "listMapOfObj": listMapType([]string{"k2"}, objectTypePtr(map[string]schema.Structural{ + "k2": stringType, + "v2": stringType, + })), + "listOfMap": listType(mapTypePtr(&stringType)), + "listOfObj": listType(objectTypePtr(map[string]schema.Structural{ + "field3": stringType, + })), + "listOfListMap": listType(listMapTypePtr([]string{"k3"}, objectTypePtr(map[string]schema.Structural{ + "k3": stringType, + "v3": stringType, + }))), + }), + expectCost: map[string]uint64{ + "self.obj.field == 'a'": 4, + "self.mapOfMap['x']['y'] == 'b'": 5, + "self.mapOfObj['k'].field2 == 'c'": 5, + "self.mapOfListMap['o'].exists(e, e.k == '1' && e.v == 'd')": 10485754, + "self.mapOfList['l'][0] == 'e'": 5, + "self.listMapOfObj.exists(e, e.k2 == '2' && e.v2 == 'f')": 10485753, + "self.listOfMap[0]['z'] == 'g'": 5, + "self.listOfObj[0].field3 == 'h'": 5, + "self.listOfListMap[0].exists(e, e.k3 == '3' && e.v3 == 'i')": 10485754, + }, + }, + {name: "optionals", + schema: objectTypePtr(map[string]schema.Structural{ + "obj": objectType(map[string]schema.Structural{ + "field": stringType, + "absentField": stringType, + }), + "m": mapType(&stringType), + "l": listType(&stringType), + }), + expectCost: map[string]uint64{ + "optional.of('a') != optional.of('b')": uint64(1844674407370955266), + "optional.of('a') != optional.none()": uint64(1844674407370955266), + "optional.of('a').hasValue()": 2, + "optional.of('a').or(optional.of('a')).hasValue()": 4, // or() is short-circuited + "optional.none().or(optional.of('a')).hasValue()": 4, + "optional.of('a').optMap(v, v == 'value').hasValue()": 17, + "self.obj.?field == optional.of('a')": uint64(1844674407370955268), + "self.obj.?absentField == optional.none()": uint64(1844674407370955268), + "self.obj.?field.orValue('v') == 'a'": 5, + "self.m[?'k'] == optional.of('v')": uint64(1844674407370955268), + "self.l[?0] == optional.of('a')": uint64(1844674407370955268), + "optional.ofNonZeroValue(1).hasValue()": 2, + }, + }, + } + + for _, tt := range cases { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + for validRule, expectedCost := range tt.expectCost { + validRule := validRule + expectedCost := expectedCost + testName := validRule + if len(testName) > 127 { + testName = testName[:127] + } + t.Run(testName, func(t *testing.T) { + t.Parallel() + s := withRule(*tt.schema, validRule) + t.Run("calc maxLength", schemaChecker(&s, uint64(expectedCost), 0, t)) + }) + } + }) + } +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation_test.go index b4e8e24fc1225..2d56211281eff 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation_test.go @@ -989,6 +989,35 @@ func genStringWithRule(rule string) func(maxLength *int64) *schema.Structural { } } +// genEnumWithRuleAndValues creates a function that accepts an optional maxLength +// with given validation rule and a set of enum values, following the convention of existing tests. +// The test has two checks, first with maxLength unset to check if maxLength can be concluded from enums, +// second with maxLength set to ensure it takes precedence. +func genEnumWithRuleAndValues(rule string, values ...string) func(maxLength *int64) *schema.Structural { + enums := make([]schema.JSON, 0, len(values)) + for _, v := range values { + enums = append(enums, schema.JSON{Object: v}) + } + return func(maxLength *int64) *schema.Structural { + return &schema.Structural{ + Generic: schema.Generic{ + Type: "string", + }, + ValueValidation: &schema.ValueValidation{ + MaxLength: maxLength, + Enum: enums, + }, + Extensions: schema.Extensions{ + XValidations: apiextensions.ValidationRules{ + { + Rule: rule, + }, + }, + }, + } + } +} + func genBytesWithRule(rule string) func(maxLength *int64) *schema.Structural { return func(maxLength *int64) *schema.Structural { return &schema.Structural{ @@ -1744,6 +1773,13 @@ func TestCostEstimation(t *testing.T) { setMaxElements: 42, expectedSetCost: 8, }, + { + name: "enums with maxLength equals to the longest possible value", + schemaGenerator: genEnumWithRuleAndValues("self.contains('A')", "A", "B", "C", "LongValue"), + expectedCalcCost: 2, + setMaxElements: 1000, + expectedSetCost: 401, + }, } for _, testCase := range cases { t.Run(testCase.name, func(t *testing.T) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go index e3e940afa0bec..0bc109a73f806 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go @@ -54,6 +54,13 @@ func (s *Structural) Format() string { return s.Structural.ValueValidation.Format } +func (s *Structural) Pattern() string { + if s.Structural.ValueValidation == nil { + return "" + } + return s.Structural.ValueValidation.Pattern +} + func (s *Structural) Items() common.Schema { return &Structural{Structural: s.Structural.Items} } @@ -81,6 +88,48 @@ func (s *Structural) Default() any { return s.Structural.Default.Object } +func (s *Structural) Minimum() *float64 { + if s.Structural.ValueValidation == nil { + return nil + } + return s.Structural.ValueValidation.Minimum +} + +func (s *Structural) IsExclusiveMinimum() bool { + if s.Structural.ValueValidation == nil { + return false + } + return s.Structural.ValueValidation.ExclusiveMinimum +} + +func (s *Structural) Maximum() *float64 { + if s.Structural.ValueValidation == nil { + return nil + } + return s.Structural.ValueValidation.Maximum +} + +func (s *Structural) IsExclusiveMaximum() bool { + if s.Structural.ValueValidation == nil { + return false + } + return s.Structural.ValueValidation.ExclusiveMaximum +} + +func (s *Structural) MultipleOf() *float64 { + if s.Structural.ValueValidation == nil { + return nil + } + return s.Structural.ValueValidation.MultipleOf +} + +func (s *Structural) MinItems() *int64 { + if s.Structural.ValueValidation == nil { + return nil + } + return s.Structural.ValueValidation.MinItems +} + func (s *Structural) MaxItems() *int64 { if s.Structural.ValueValidation == nil { return nil @@ -88,6 +137,13 @@ func (s *Structural) MaxItems() *int64 { return s.Structural.ValueValidation.MaxItems } +func (s *Structural) MinLength() *int64 { + if s.Structural.ValueValidation == nil { + return nil + } + return s.Structural.ValueValidation.MinLength +} + func (s *Structural) MaxLength() *int64 { if s.Structural.ValueValidation == nil { return nil @@ -95,6 +151,13 @@ func (s *Structural) MaxLength() *int64 { return s.Structural.ValueValidation.MaxLength } +func (s *Structural) MinProperties() *int64 { + if s.Structural.ValueValidation == nil { + return nil + } + return s.Structural.ValueValidation.MinProperties +} + func (s *Structural) MaxProperties() *int64 { if s.Structural.ValueValidation == nil { return nil @@ -109,6 +172,12 @@ func (s *Structural) Required() []string { return s.Structural.ValueValidation.Required } +func (s *Structural) UniqueItems() bool { + // This field is forbidden in structural schema. + // but you can just you x-kubernetes-list-type:set to get around it :) + return false +} + func (s *Structural) Enum() []any { if s.Structural.ValueValidation == nil { return nil @@ -143,10 +212,110 @@ func (s *Structural) XListType() string { return *s.Structural.XListType } +func (s *Structural) XMapType() string { + if s.Structural.XMapType == nil { + return "" + } + return *s.Structural.XMapType +} + func (s *Structural) XListMapKeys() []string { return s.Structural.XListMapKeys } +func (s *Structural) AllOf() []common.Schema { + var res []common.Schema + for _, subSchema := range s.Structural.ValueValidation.AllOf { + subSchema := subSchema + res = append(res, nestedValueValidationToStructural(&subSchema)) + } + return res +} + +func (s *Structural) AnyOf() []common.Schema { + var res []common.Schema + for _, subSchema := range s.Structural.ValueValidation.AnyOf { + subSchema := subSchema + res = append(res, nestedValueValidationToStructural(&subSchema)) + } + return res +} + +func (s *Structural) OneOf() []common.Schema { + var res []common.Schema + for _, subSchema := range s.Structural.ValueValidation.OneOf { + subSchema := subSchema + res = append(res, nestedValueValidationToStructural(&subSchema)) + } + return res +} + +func (s *Structural) Not() common.Schema { + if s.Structural.ValueValidation.Not == nil { + return nil + } + return nestedValueValidationToStructural(s.Structural.ValueValidation.Not) +} + +// nestedValueValidationToStructural converts a nested value validation to +// an equivalent structural schema instance. +// +// This lets us avoid needing a separate adaptor for the nested value +// validations, and doesn't cost too much since since we are usually exploring the +// entire schema anyway. +func nestedValueValidationToStructural(nvv *schema.NestedValueValidation) *Structural { + var newItems *schema.Structural + if nvv.Items != nil { + newItems = nestedValueValidationToStructural(nvv.Items).Structural + } + + var newProperties map[string]schema.Structural + for k, v := range nvv.Properties { + if newProperties == nil { + newProperties = make(map[string]schema.Structural) + } + + v := v + newProperties[k] = *nestedValueValidationToStructural(&v).Structural + } + + return &Structural{ + Structural: &schema.Structural{ + Items: newItems, + Properties: newProperties, + ValueValidation: &nvv.ValueValidation, + }, + } +} + +type StructuralValidationRule struct { + rule, message, messageExpression, fieldPath string +} + +func (s *StructuralValidationRule) Rule() string { + return s.rule +} +func (s *StructuralValidationRule) Message() string { + return s.message +} +func (s *StructuralValidationRule) FieldPath() string { + return s.fieldPath +} +func (s *StructuralValidationRule) MessageExpression() string { + return s.messageExpression +} + +func (s *Structural) XValidations() []common.ValidationRule { + if len(s.Structural.XValidations) == 0 { + return nil + } + result := make([]common.ValidationRule, len(s.Structural.XValidations)) + for i, v := range s.Structural.XValidations { + result[i] = &StructuralValidationRule{rule: v.Rule, message: v.Message, messageExpression: v.MessageExpression, fieldPath: v.FieldPath} + } + return result +} + func (s *Structural) WithTypeAndObjectMeta() common.Schema { return &Structural{Structural: WithTypeAndObjectMeta(s.Structural)} } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go index 3f3905bf8a8ce..14e48c4c76f20 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go @@ -38,8 +38,10 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/common" "k8s.io/apiserver/pkg/cel/environment" "k8s.io/apiserver/pkg/cel/metrics" + "k8s.io/apiserver/pkg/warning" celconfig "k8s.io/apiserver/pkg/apis/cel" ) @@ -142,11 +144,85 @@ func validator(s *schema.Structural, isResourceRoot bool, declType *cel.DeclType return nil } +type options struct { + ratchetingOptions +} + +type Option func(*options) + +func WithRatcheting(correlation *common.CorrelatedObject) Option { + return func(o *options) { + o.currentCorrelation = correlation + } +} + // Validate validates all x-kubernetes-validations rules in Validator against obj and returns any errors. // If the validation rules exceed the costBudget, subsequent evaluations will be skipped, the list of errs returned will not be empty, and a negative remainingBudget will be returned. // Most callers can ignore the returned remainingBudget value unless another validate call is going to be made // context is passed for supporting context cancellation during cel validation -func (s *Validator) Validate(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) Validate(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, costBudget int64, opts ...Option) (errs field.ErrorList, remainingBudget int64) { + opt := options{} + for _, o := range opts { + o(&opt) + } + + return s.validate(ctx, fldPath, sts, obj, oldObj, opt.ratchetingOptions, costBudget) +} + +// ratchetingOptions stores the current correlation object and the nearest +// parent which was correlatable. The parent is stored so that we can check at +// the point an error is thrown whether it should be ratcheted using simple +// logic +// Key and Index should be used as normally to traverse to the next node. +type ratchetingOptions struct { + // Current correlation object. If nil, then this node is from an uncorrelatable + // part of the schema + currentCorrelation *common.CorrelatedObject + + // If currentCorrelation is nil, this is the nearest parent to this node + // which was correlatable. If the parent is deepequal to its old value, + // then errors thrown by this node are ratcheted + nearestParentCorrelation *common.CorrelatedObject +} + +// shouldRatchetError returns true if the errors raised by the current node +// should be ratcheted. +// +// Errors for the current node should be ratcheted if one of the following is true: +// 1. The current node is correlatable, and it is equal to its old value +// 2. The current node has a correlatable ancestor, and the ancestor is equal +// to its old value. +func (r ratchetingOptions) shouldRatchetError() bool { + if r.currentCorrelation != nil { + return r.currentCorrelation.CachedDeepEqual() + } + + return r.nearestParentCorrelation.CachedDeepEqual() +} + +// Finds the next node following the field in the tree and returns options using +// that node. If none could be found, then retains a reference to the last +// correlatable ancestor for ratcheting purposes +func (r ratchetingOptions) key(field string) ratchetingOptions { + if r.currentCorrelation == nil { + return r + } + + return ratchetingOptions{currentCorrelation: r.currentCorrelation.Key(field), nearestParentCorrelation: r.currentCorrelation} +} + +// Finds the next node following the index in the tree and returns options using +// that node. If none could be found, then retains a reference to the last +// correlatable ancestor for ratcheting purposes +func (r ratchetingOptions) index(idx int) ratchetingOptions { + if r.currentCorrelation == nil { + return r + } + + return ratchetingOptions{currentCorrelation: r.currentCorrelation.Index(idx), nearestParentCorrelation: r.currentCorrelation} +} + +func (s *Validator) validate(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { t := time.Now() defer func() { metrics.Metrics.ObserveEvaluation(time.Since(t)) @@ -156,28 +232,31 @@ func (s *Validator) Validate(ctx context.Context, fldPath *field.Path, sts *sche return nil, remainingBudget } - errs, remainingBudget = s.validateExpressions(ctx, fldPath, sts, obj, oldObj, remainingBudget) + errs, remainingBudget = s.validateExpressions(ctx, fldPath, sts, obj, oldObj, correlation, remainingBudget) + if remainingBudget < 0 { return errs, remainingBudget } + switch obj := obj.(type) { case []interface{}: oldArray, _ := oldObj.([]interface{}) var arrayErrs field.ErrorList - arrayErrs, remainingBudget = s.validateArray(ctx, fldPath, sts, obj, oldArray, remainingBudget) + arrayErrs, remainingBudget = s.validateArray(ctx, fldPath, sts, obj, oldArray, correlation, remainingBudget) errs = append(errs, arrayErrs...) return errs, remainingBudget case map[string]interface{}: oldMap, _ := oldObj.(map[string]interface{}) var mapErrs field.ErrorList - mapErrs, remainingBudget = s.validateMap(ctx, fldPath, sts, obj, oldMap, remainingBudget) + mapErrs, remainingBudget = s.validateMap(ctx, fldPath, sts, obj, oldMap, correlation, remainingBudget) errs = append(errs, mapErrs...) return errs, remainingBudget } + return errs, remainingBudget } -func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { // guard against oldObj being a non-nil interface with a nil value if oldObj != nil { v := reflect.ValueOf(oldObj) @@ -263,26 +342,35 @@ func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path if len(compiled.NormalizedRuleFieldPath) > 0 { fldPath = fldPath.Child(compiled.NormalizedRuleFieldPath) } + + addErr := func(e *field.Error) { + if !compiled.TransitionRule && correlation.shouldRatchetError() { + warning.AddWarning(ctx, "", e.Error()) + } else { + errs = append(errs, e) + } + } + if compiled.MessageExpression != nil { messageExpression, newRemainingBudget, msgErr := evalMessageExpression(ctx, compiled.MessageExpression, rule.MessageExpression, activation, remainingBudget) if msgErr != nil { if msgErr.Type == cel.ErrorTypeInternal { - errs = append(errs, field.InternalError(fldPath, msgErr)) + addErr(field.InternalError(fldPath, msgErr)) return errs, -1 } else if msgErr.Type == cel.ErrorTypeInvalid { - errs = append(errs, field.Invalid(fldPath, sts.Type, msgErr.Error())) + addErr(field.Invalid(fldPath, sts.Type, msgErr.Error())) return errs, -1 } else { klog.V(2).ErrorS(msgErr, "messageExpression evaluation failed") - errs = append(errs, fieldErrorForReason(fldPath, sts.Type, ruleMessageOrDefault(rule), rule.Reason)) + addErr(fieldErrorForReason(fldPath, sts.Type, ruleMessageOrDefault(rule), rule.Reason)) remainingBudget = newRemainingBudget } } else { - errs = append(errs, fieldErrorForReason(fldPath, sts.Type, messageExpression, rule.Reason)) + addErr(fieldErrorForReason(fldPath, sts.Type, messageExpression, rule.Reason)) remainingBudget = newRemainingBudget } } else { - errs = append(errs, fieldErrorForReason(fldPath, sts.Type, ruleMessageOrDefault(rule), rule.Reason)) + addErr(fieldErrorForReason(fldPath, sts.Type, ruleMessageOrDefault(rule), rule.Reason)) } } } @@ -566,7 +654,7 @@ func (a *validationActivation) Parent() interpreter.Activation { return nil } -func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj map[string]interface{}, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj map[string]interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { remainingBudget = costBudget if remainingBudget < 0 { return errs, remainingBudget @@ -585,7 +673,7 @@ func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *s } var err field.ErrorList - err, remainingBudget = s.AdditionalProperties.Validate(ctx, fldPath.Key(k), sts.AdditionalProperties.Structural, v, oldV, remainingBudget) + err, remainingBudget = s.AdditionalProperties.validate(ctx, fldPath.Key(k), sts.AdditionalProperties.Structural, v, oldV, correlation.key(k), remainingBudget) errs = append(errs, err...) if remainingBudget < 0 { return errs, remainingBudget @@ -603,7 +691,7 @@ func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *s } var err field.ErrorList - err, remainingBudget = sub.Validate(ctx, fldPath.Child(k), &stsProp, v, oldV, remainingBudget) + err, remainingBudget = sub.validate(ctx, fldPath.Child(k), &stsProp, v, oldV, correlation.key(k), remainingBudget) errs = append(errs, err...) if remainingBudget < 0 { return errs, remainingBudget @@ -615,7 +703,7 @@ func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *s return errs, remainingBudget } -func (s *Validator) validateArray(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj []interface{}, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) validateArray(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj []interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { remainingBudget = costBudget if remainingBudget < 0 { return errs, remainingBudget @@ -627,7 +715,7 @@ func (s *Validator) validateArray(ctx context.Context, fldPath *field.Path, sts correlatableOldItems := makeMapList(sts, oldObj) for i := range obj { var err field.ErrorList - err, remainingBudget = s.Items.Validate(ctx, fldPath.Index(i), sts.Items, obj[i], correlatableOldItems.Get(obj[i]), remainingBudget) + err, remainingBudget = s.Items.validate(ctx, fldPath.Index(i), sts.Items, obj[i], correlatableOldItems.Get(obj[i]), correlation.index(i), remainingBudget) errs = append(errs, err...) if remainingBudget < 0 { return errs, remainingBudget diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation_test.go index 899ef4e97e0bc..8d67ac15021fc 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation_test.go @@ -17,22 +17,30 @@ limitations under the License. package cel import ( + "bytes" "context" "flag" "fmt" "math" "strings" + "sync" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/validation/strfmt" + apiextensionsinternal "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" celconfig "k8s.io/apiserver/pkg/apis/cel" + "k8s.io/apiserver/pkg/cel/common" + "k8s.io/apiserver/pkg/warning" ) // TestValidationExpressions tests CEL integration with custom resource values and OpenAPIv3. @@ -3014,6 +3022,591 @@ func TestValidateFieldPath(t *testing.T) { } } +// FixTabsOrDie counts the number of tab characters preceding the first +// line in the given yaml object. It removes that many tabs from every +// line. It panics (it's a test funvion) if some line has fewer tabs +// than the first line. +// +// The purpose of this is to make it easier to read tests. +func FixTabsOrDie(in string) string { + lines := bytes.Split([]byte(in), []byte{'\n'}) + if len(lines[0]) == 0 && len(lines) > 1 { + lines = lines[1:] + } + // Create prefix made of tabs that we want to remove. + var prefix []byte + for _, c := range lines[0] { + if c != '\t' { + break + } + prefix = append(prefix, byte('\t')) + } + // Remove prefix from all tabs, fail otherwise. + for i := range lines { + line := lines[i] + // It's OK for the last line to be blank (trailing \n) + if i == len(lines)-1 && len(line) <= len(prefix) && bytes.TrimSpace(line) == nil { + lines[i] = []byte{} + break + } + if !bytes.HasPrefix(line, prefix) { + minRange := i - 5 + maxRange := i + 5 + if minRange < 0 { + minRange = 0 + } + if maxRange > len(lines) { + maxRange = len(lines) + } + panic(fmt.Errorf("line %d doesn't start with expected number (%d) of tabs (%v-%v):\n%v", i, len(prefix), minRange, maxRange, string(bytes.Join(lines[minRange:maxRange], []byte{'\n'})))) + } + lines[i] = line[len(prefix):] + } + + joined := string(bytes.Join(lines, []byte{'\n'})) + + // Convert rest of tabs to spaces since yaml doesnt like yabs + // (assuming 2 space alignment) + return strings.ReplaceAll(joined, "\t", " ") +} + +// Creates a *spec.Schema Schema by decoding the given YAML. Panics on error +func mustSchema(source string) *schema.Structural { + source = FixTabsOrDie(source) + d := yaml.NewYAMLOrJSONDecoder(strings.NewReader(source), 4096) + props := &apiextensions.JSONSchemaProps{} + if err := d.Decode(props); err != nil { + panic(err) + } + convertedProps := &apiextensionsinternal.JSONSchemaProps{} + if err := apiextensions.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(props, convertedProps, nil); err != nil { + panic(err) + } + + res, err := schema.NewStructural(convertedProps) + if err != nil { + panic(err) + } + return res +} + +// Creates an *unstructured by decoding the given YAML. Panics on error +func mustUnstructured(source string) interface{} { + source = FixTabsOrDie(source) + d := yaml.NewYAMLOrJSONDecoder(strings.NewReader(source), 4096) + var res interface{} + if err := d.Decode(&res); err != nil { + panic(err) + } + return res +} + +type warningRecorder struct { + mu sync.Mutex + warnings []string +} + +// AddWarning adds a warning to recorder. +func (r *warningRecorder) AddWarning(agent, text string) { + r.mu.Lock() + defer r.mu.Unlock() + r.warnings = append(r.warnings, text) +} + +func (r *warningRecorder) Warnings() []string { + r.mu.Lock() + defer r.mu.Unlock() + + warnings := make([]string, len(r.warnings)) + copy(warnings, r.warnings) + return warnings +} + +func TestRatcheting(t *testing.T) { + cases := []struct { + name string + schema *schema.Structural + oldObj interface{} + newObj interface{} + + // Errors that should occur when evaluating this operation with + // ratcheting feature enabled + errors []string + + // Errors that should occur when evaluating this operation with + // ratcheting feature disabled + // These errors should be raised as warnings when ratcheting is enabled + warnings []string + + runtimeCostBudget int64 + }{ + { + name: "normal CEL expression", + schema: mustSchema(` + type: object + properties: + foo: + type: string + x-kubernetes-validations: + - rule: self == "bar" + message: "gotta be baz" + `), + oldObj: mustUnstructured(` + foo: baz + `), + newObj: mustUnstructured(` + foo: baz + `), + warnings: []string{ + `root.foo: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "normal CEL expression thats a descendent of an atomic array whose parent is totally unchanged", + schema: mustSchema(` + type: array + x-kubernetes-list-type: atomic + items: + type: object + properties: + bar: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + `), + // CEL error comes from uncorrelatable portion of the schema, + // but it should be ratcheted anyway because it is the descendent + // of an unchanged correlatable node + oldObj: mustUnstructured(` + - bar: bar + `), + newObj: mustUnstructured(` + - bar: bar + `), + warnings: []string{ + `root[0].bar: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "normal CEL expression thats a descendent of a set whose parent is totally unchanged", + schema: mustSchema(` + type: array + x-kubernetes-list-type: set + items: + type: number + x-kubernetes-validations: + - rule: int(self) % 2 == 1 + message: "gotta be odd" + `), + // CEL error comes from uncorrelatable portion of the schema, + // but it should be ratcheted anyway because it is the descendent + // of an unchanged correlatable node + oldObj: mustUnstructured(` + - 1 + - 2 + `), + newObj: mustUnstructured(` + - 1 + - 2 + `), + warnings: []string{ + `root[1]: Invalid value: "number": gotta be odd`, + }, + }, + { + name: "normal CEL expression thats a descendent of a set and one of its siblings has changed", + schema: mustSchema(` + type: object + properties: + stringField: + type: string + setArray: + type: array + x-kubernetes-list-type: set + items: + type: number + x-kubernetes-validations: + - rule: int(self) % 2 == 1 + message: "gotta be odd" + `), + oldObj: mustUnstructured(` + stringField: foo + setArray: + - 1 + - 3 + - 2 + `), + newObj: mustUnstructured(` + stringField: changed but ratcheted + setArray: + - 1 + - 3 + - 2 + `), + warnings: []string{ + `root.setArray[2]: Invalid value: "number": gotta be odd`, + }, + }, + { + name: "descendent of a map list whose parent is unchanged", + schema: mustSchema(` + type: array + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: ["key"] + items: + type: object + properties: + key: + type: string + value: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + `), + oldObj: mustUnstructured(` + - key: foo + value: notbaz + - key: bar + value: notbaz + `), + newObj: mustUnstructured(` + - key: foo + value: notbaz + - key: bar + value: notbaz + - key: baz + value: baz + `), + warnings: []string{ + `root[0].value: Invalid value: "string": gotta be baz`, + `root[1].value: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "descendent of a map list whose siblings have changed", + schema: mustSchema(` + type: array + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: ["key"] + items: + type: object + properties: + key: + type: string + value: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + `), + oldObj: mustUnstructured(` + - key: foo + value: notbaz + - key: bar + value: notbaz + `), + newObj: mustUnstructured(` + - key: foo + value: baz + - key: bar + value: notbaz + `), + warnings: []string{ + `root[1].value: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "descendent of a map whose parent is totally unchanged", + schema: mustSchema(` + type: object + properties: + stringField: + type: string + mapField: + type: object + properties: + foo: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + mapField: + type: object + properties: + bar: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be nested baz" + `), + oldObj: mustUnstructured(` + stringField: foo + mapField: + foo: notbaz + mapField: + bar: notbaz + `), + newObj: mustUnstructured(` + stringField: foo + mapField: + foo: notbaz + mapField: + bar: notbaz + `), + warnings: []string{ + `root.mapField.foo: Invalid value: "string": gotta be baz`, + `root.mapField.mapField.bar: Invalid value: "string": gotta be nested baz`, + }, + }, + { + name: "descendent of a map whose siblings have changed", + schema: mustSchema(` + type: object + properties: + stringField: + type: string + mapField: + type: object + properties: + foo: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + mapField: + type: object + properties: + bar: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + otherBar: + type: string + x-kubernetes-validations: + - rule: self == "otherBaz" + message: "gotta be otherBaz" + `), + oldObj: mustUnstructured(` + stringField: foo + mapField: + foo: baz + mapField: + bar: notbaz + otherBar: nototherBaz + `), + newObj: mustUnstructured(` + stringField: foo + mapField: + foo: notbaz + mapField: + bar: notbaz + otherBar: otherBaz + `), + errors: []string{ + // Didn't get ratcheted because we changed its value from baz to notbaz + `root.mapField.foo: Invalid value: "string": gotta be baz`, + }, + warnings: []string{ + // Ratcheted because its value remained the same, even though it is invalid + `root.mapField.mapField.bar: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "normal CEL expression thats a descendent of an atomic array whose siblings has changed", + schema: mustSchema(` + type: object + properties: + stringField: + type: string + atomicArray: + type: array + x-kubernetes-list-type: atomic + items: + type: object + properties: + bar: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + `), + oldObj: mustUnstructured(` + stringField: foo + atomicArray: + - bar: bar + `), + newObj: mustUnstructured(` + stringField: changed but ratcheted + atomicArray: + - bar: bar + `), + warnings: []string{ + `root.atomicArray[0].bar: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "we can't ratchet a normal CEL expression from an uncorrelatable part of the schema whose parent nodes has changed", + schema: mustSchema(` + type: array + x-kubernetes-list-type: atomic + items: + type: object + properties: + bar: + type: string + x-kubernetes-validations: + - rule: self == "baz" + message: "gotta be baz" + `), + // CEL error comes from uncorrelatable portion of the schema, + // but it should be ratcheted anyway because it is the descendent + // or an unchanged correlatable node + oldObj: mustUnstructured(` + - bar: bar + `), + newObj: mustUnstructured(` + - bar: bar + - bar: baz + `), + errors: []string{ + `root[0].bar: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "transition rules never ratchet for correlatable schemas", + schema: mustSchema(` + type: object + properties: + foo: + type: string + x-kubernetes-validations: + - rule: oldSelf != "bar" && self == "baz" + message: gotta be baz + `), + oldObj: mustUnstructured(` + foo: bar + `), + newObj: mustUnstructured(` + foo: bar + `), + errors: []string{ + `root.foo: Invalid value: "string": gotta be baz`, + }, + }, + { + name: "changing field path does not change ratcheting logic", + schema: mustSchema(` + type: object + x-kubernetes-validations: + - rule: self.foo == "baz" + message: gotta be baz + fieldPath: ".foo" + properties: + bar: + type: string + foo: + type: string + `), + oldObj: mustUnstructured(` + foo: bar + `), + // Fieldpath is on unchanged field `foo`, but since rule is on the + // changed parent object we still get an error + newObj: mustUnstructured(` + foo: bar + bar: invalid + `), + errors: []string{ + `root.foo: Invalid value: "object": gotta be baz`, + }, + }, + { + name: "cost budget errors are not ratcheted", + schema: mustSchema(` + type: string + minLength: 5 + x-kubernetes-validations: + - rule: self == "baz" + message: gotta be baz + `), + oldObj: "unchanged", + newObj: "unchanged", + runtimeCostBudget: 1, + errors: []string{ + `validation failed due to running out of cost budget, no further validation rules will be run`, + }, + }, + { + name: "compile errors are not ratcheted", + schema: mustSchema(` + type: string + x-kubernetes-validations: + - rule: asdausidyhASDNJm + message: gotta be baz + `), + oldObj: "unchanged", + newObj: "unchanged", + errors: []string{ + `rule compile error: compilation failed: ERROR: :1:1: undeclared reference to 'asdausidyhASDNJm'`, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + validator := NewValidator(c.schema, false, celconfig.PerCallLimit) + require.NotNil(t, validator) + recorder := &warningRecorder{} + ctx := warning.WithWarningRecorder(context.TODO(), recorder) + budget := c.runtimeCostBudget + if budget == 0 { + budget = celconfig.RuntimeCELCostBudget + } + errs, _ := validator.Validate( + ctx, + field.NewPath("root"), + c.schema, + c.newObj, + c.oldObj, + budget, + WithRatcheting(common.NewCorrelatedObject(c.newObj, c.oldObj, &model.Structural{Structural: c.schema})), + ) + + require.Len(t, errs, len(c.errors), "must have expected number of errors") + require.Len(t, recorder.Warnings(), len(c.warnings), "must have expected number of warnings") + + // Check that the expected errors were raised + for _, expectedErr := range c.errors { + found := false + for _, err := range errs { + if strings.Contains(err.Error(), expectedErr) { + found = true + break + } + } + + assert.True(t, found, "expected error %q not found", expectedErr) + } + + // Check that the ratcheting disabled errors were raised as warnings + for _, expectedWarning := range c.warnings { + found := false + for _, warning := range recorder.Warnings() { + if warning == expectedWarning { + found = true + break + } + } + assert.True(t, found, "expected warning %q not found", expectedWarning) + } + + }) + } +} + func genString(n int, c rune) string { b := strings.Builder{} for i := 0; i < n; i++ { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go index cd7c6e07558ba..02945b14f4ca1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go @@ -21,9 +21,6 @@ import ( "fmt" "reflect" - "k8s.io/kube-openapi/pkg/validation/strfmt" - kubeopenapivalidate "k8s.io/kube-openapi/pkg/validation/validate" - structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel" schemaobjectmeta "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta" @@ -74,7 +71,7 @@ func validate(ctx context.Context, pth *field.Path, s *structuralschema.Structur isResourceRoot := s == rootSchema if s.Default.Object != nil { - validator := kubeopenapivalidate.NewSchemaValidator(s.ToKubeOpenAPI(), nil, "", strfmt.Default) + validator := apiservervalidation.NewSchemaValidatorFromOpenAPI(s.ToKubeOpenAPI()) if insideMeta { obj, _, err := f(runtime.DeepCopyJSONValue(s.Default.Object)) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go index 6565d83eee53c..3cc653e7466c9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go @@ -54,13 +54,28 @@ func NewRatchetingSchemaValidator(schema *spec.Schema, rootSchema interface{}, r } } -func (r *RatchetingSchemaValidator) Validate(new interface{}) *validate.Result { +func (r *RatchetingSchemaValidator) Validate(new interface{}, options ...ValidationOption) *validate.Result { sv := validate.NewSchemaValidator(r.schema, r.root, r.path, r.knownFormats, r.options...) return sv.Validate(new) } -func (r *RatchetingSchemaValidator) ValidateUpdate(new, old interface{}) *validate.Result { - return newRatchetingValueValidator(new, old, r.schemaArgs).Validate() +func (r *RatchetingSchemaValidator) ValidateUpdate(new, old interface{}, options ...ValidationOption) *validate.Result { + opts := NewValidationOptions(options...) + + if !opts.Ratcheting { + sv := validate.NewSchemaValidator(r.schema, r.root, r.path, r.knownFormats, r.options...) + return sv.Validate(new) + } + + correlation := opts.CorrelatedObject + if correlation == nil { + correlation = common.NewCorrelatedObject(new, old, &celopenapi.Schema{Schema: r.schema}) + } + + return newRatchetingValueValidator( + correlation, + r.schemaArgs, + ).Validate(new) } // ratchetingValueValidator represents an invocation of SchemaValidator.ValidateUpdate @@ -80,40 +95,13 @@ type ratchetingValueValidator struct { // schemaArgs provides the arguments to use in the temporary SchemaValidator // that is created during a call to Validate. schemaArgs - - // Currently correlated old value during traversal of the schema/object - oldValue interface{} - - // Value being validated - value interface{} - - // Scratch space below, may change during validation - - // Cached comparison result of DeepEqual of `value` and `thunk.oldValue` - comparisonResult *bool - - // Cached map representation of a map-type list, or nil if not map-type list - mapList common.MapList - - // Children spawned by a call to `Validate` on this object - // key is either a string or an index, depending upon whether `value` is - // a map or a list, respectively. - // - // The list of children may be incomplete depending upon if the internal - // logic of kube-openapi's SchemaValidator short-circuited before - // reaching all of the children. - // - // It should be expected to have an entry for either all of the children, or - // none of them. - children map[interface{}]*ratchetingValueValidator + correlation *common.CorrelatedObject } -func newRatchetingValueValidator(newValue, oldValue interface{}, args schemaArgs) *ratchetingValueValidator { +func newRatchetingValueValidator(correlation *common.CorrelatedObject, args schemaArgs) *ratchetingValueValidator { return &ratchetingValueValidator{ - oldValue: oldValue, - value: newValue, - schemaArgs: args, - children: map[interface{}]*ratchetingValueValidator{}, + schemaArgs: args, + correlation: correlation, } } @@ -121,12 +109,8 @@ func newRatchetingValueValidator(newValue, oldValue interface{}, args schemaArgs // that injects a ratchetingValueValidator to be used for all subkeys and subindices func (r *ratchetingValueValidator) getValidateOption() validate.Option { return func(svo *validate.SchemaValidatorOptions) { - svo.NewValidatorForField = func(field string, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts ...validate.Option) validate.ValueValidator { - return r.SubPropertyValidator(field, schema, rootSchema, root, formats, opts...) - } - svo.NewValidatorForIndex = func(index int, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts ...validate.Option) validate.ValueValidator { - return r.SubIndexValidator(index, schema, rootSchema, root, formats, opts...) - } + svo.NewValidatorForField = r.SubPropertyValidator + svo.NewValidatorForIndex = r.SubIndexValidator } } @@ -149,21 +133,21 @@ func (r *ratchetingValueValidator) getValidateOption() validate.Option { // // This call has a side-effect of populating it's `children` variable with // the explored nodes of the object tree. -func (r *ratchetingValueValidator) Validate() *validate.Result { +func (r *ratchetingValueValidator) Validate(new interface{}) *validate.Result { opts := append([]validate.Option{ r.getValidateOption(), }, r.options...) s := validate.NewSchemaValidator(r.schema, r.root, r.path, r.knownFormats, opts...) - res := s.Validate(r.value) + res := s.Validate(r.correlation.Value) if res.IsValid() { return res } // Current ratcheting rule is to ratchet errors if DeepEqual(old, new) is true. - if r.CachedDeepEqual() { + if r.correlation.CachedDeepEqual() { newRes := &validate.Result{} newRes.MergeAsWarnings(res) return newRes @@ -180,30 +164,18 @@ func (r *ratchetingValueValidator) Validate() *validate.Result { // // If the old value cannot be correlated, then default validation is used. func (r *ratchetingValueValidator) SubPropertyValidator(field string, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...validate.Option) validate.ValueValidator { - // Find correlated old value - asMap, ok := r.oldValue.(map[string]interface{}) - if !ok { - return validate.NewSchemaValidator(schema, rootSchema, root, formats, options...) - } - - oldValueForField, ok := asMap[field] - if !ok { + childNode := r.correlation.Key(field) + if childNode == nil { return validate.NewSchemaValidator(schema, rootSchema, root, formats, options...) } - return inlineValidator(func(new interface{}) *validate.Result { - childNode := newRatchetingValueValidator(new, oldValueForField, schemaArgs{ - schema: schema, - root: rootSchema, - path: root, - knownFormats: formats, - options: options, - }) - - r.children[field] = childNode - return childNode.Validate() + return newRatchetingValueValidator(childNode, schemaArgs{ + schema: schema, + root: rootSchema, + path: root, + knownFormats: formats, + options: options, }) - } // SubIndexValidator overrides the standard validator constructor for sub-indicies by @@ -214,199 +186,27 @@ func (r *ratchetingValueValidator) SubPropertyValidator(field string, schema *sp // // If the old value cannot be correlated, then default validation is used. func (r *ratchetingValueValidator) SubIndexValidator(index int, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...validate.Option) validate.ValueValidator { - oldValueForIndex := r.correlateOldValueForChildAtNewIndex(index) - if oldValueForIndex == nil { - // If correlation fails, default to non-ratcheting logic + childNode := r.correlation.Index(index) + if childNode == nil { return validate.NewSchemaValidator(schema, rootSchema, root, formats, options...) } - return inlineValidator(func(new interface{}) *validate.Result { - childNode := newRatchetingValueValidator(new, oldValueForIndex, schemaArgs{ - schema: schema, - root: rootSchema, - path: root, - knownFormats: formats, - options: options, - }) - - r.children[index] = childNode - return childNode.Validate() + return newRatchetingValueValidator(childNode, schemaArgs{ + schema: schema, + root: rootSchema, + path: root, + knownFormats: formats, + options: options, }) } -// If oldValue is not a list, returns nil -// If oldValue is a list takes mapType into account and attempts to find the -// old value with the same index or key, depending upon the mapType. -// -// If listType is map, creates a map representation of the list using the designated -// map-keys and caches it for future calls. -func (r *ratchetingValueValidator) correlateOldValueForChildAtNewIndex(index int) any { - oldAsList, ok := r.oldValue.([]interface{}) - if !ok { - return nil - } - - asList, ok := r.value.([]interface{}) - if !ok { - return nil - } else if len(asList) <= index { - // Cannot correlate out of bounds index - return nil - } - - listType, _ := r.schema.Extensions.GetString("x-kubernetes-list-type") - switch listType { - case "map": - // Look up keys for this index in current object - currentElement := asList[index] - - oldList := r.mapList - if oldList == nil { - oldList = celopenapi.MakeMapList(r.schema, oldAsList) - r.mapList = oldList - } - return oldList.Get(currentElement) - - case "set": - // Are sets correlatable? Only if the old value equals the current value. - // We might be able to support this, but do not currently see a lot - // of value - // (would allow you to add/remove items from sets with ratcheting but not change them) - return nil - case "atomic": - // Atomic lists are not correlatable by item - // Ratcheting is not available on a per-index basis - return nil - default: - // Correlate by-index by default. - // - // Cannot correlate an out-of-bounds index - if len(oldAsList) <= index { - return nil - } - - return oldAsList[index] - } -} - -// CachedDeepEqual is equivalent to reflect.DeepEqual, but caches the -// results in the tree of ratchetInvocationScratch objects on the way: -// -// For objects and arrays, this function will make a best effort to make -// use of past DeepEqual checks performed by this Node's children, if available. -// -// If a lazy computation could not be found for all children possibly due -// to validation logic short circuiting and skipping the children, then -// this function simply defers to reflect.DeepEqual. -func (r *ratchetingValueValidator) CachedDeepEqual() (res bool) { - if r.comparisonResult != nil { - return *r.comparisonResult - } - - defer func() { - r.comparisonResult = &res - }() - - if r.value == nil && r.oldValue == nil { - return true - } else if r.value == nil || r.oldValue == nil { - return false - } - - oldAsArray, oldIsArray := r.oldValue.([]interface{}) - newAsArray, newIsArray := r.value.([]interface{}) - - if oldIsArray != newIsArray { - return false - } else if oldIsArray { - if len(oldAsArray) != len(newAsArray) { - return false - } else if len(r.children) != len(oldAsArray) { - // kube-openapi validator is written to always visit all - // children of a slice, so this case is only possible if - // one of the children could not be correlated. In that case, - // we know the objects are not equal. - // - return false - } - - // Correctly considers map-type lists due to fact that index here - // is only used for numbering. The correlation is stored in the - // childInvocation itself - // - // NOTE: This does not consider sets, since we don't correlate them. - for i := range newAsArray { - // Query for child - child, ok := r.children[i] - if !ok { - // This should not happen - return false - } else if !child.CachedDeepEqual() { - // If one child is not equal the entire object is not equal - return false - } - } - - return true - } - - oldAsMap, oldIsMap := r.oldValue.(map[string]interface{}) - newAsMap, newIsMap := r.value.(map[string]interface{}) - - if oldIsMap != newIsMap { - return false - } else if oldIsMap { - if len(oldAsMap) != len(newAsMap) { - return false - } else if len(oldAsMap) == 0 && len(newAsMap) == 0 { - // Both empty - return true - } else if len(r.children) != len(oldAsMap) { - // If we are missing a key it is because the old value could not - // be correlated to the new, so the objects are not equal. - // - return false - } - - for k := range oldAsMap { - // Check to see if this child was explored during validation - child, ok := r.children[k] - if !ok { - // Child from old missing in new due to key change - // Objects are not equal. - return false - } else if !child.CachedDeepEqual() { - // If one child is not equal the entire object is not equal - return false - } - } - - return true - } - - return reflect.DeepEqual(r.oldValue, r.value) -} - -// A validator which just calls a validate function, and advertises that it -// validates anything -// -// In the future kube-openapi's ValueValidator interface can be simplified -// to be closer to `currentValidator.Options.NewValidator(value, ...).Validate()` -// so that a tree of "validation nodes" can be more formally encoded in the API. -// In that case this class would not be necessary. -type inlineValidator func(new interface{}) *validate.Result - -var _ validate.ValueValidator = inlineValidator(nil) - -func (f inlineValidator) Validate(new interface{}) *validate.Result { - return f(new) -} +var _ validate.ValueValidator = (&ratchetingValueValidator{}) -func (f inlineValidator) SetPath(path string) { +func (r ratchetingValueValidator) SetPath(path string) { // Do nothing // Unused by kube-openapi } -func (f inlineValidator) Applies(source interface{}, valueKind reflect.Kind) bool { +func (r ratchetingValueValidator) Applies(source interface{}, valueKind reflect.Kind) bool { return true } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting_test.go index 078bb548cdcb4..c136617218c23 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting_test.go @@ -58,8 +58,8 @@ var largeIntSchema *spec.Schema = &spec.Schema{ func TestScalarRatcheting(t *testing.T) { validator := validation.NewRatchetingSchemaValidator(mediumIntSchema, nil, "", strfmt.Default) - require.True(t, validator.ValidateUpdate(1, 1).IsValid()) - require.False(t, validator.ValidateUpdate(1, 2).IsValid()) + require.True(t, validator.ValidateUpdate(1, 1, validation.WithRatcheting(nil)).IsValid()) + require.False(t, validator.ValidateUpdate(1, 2, validation.WithRatcheting(nil)).IsValid()) } var objectSchema *spec.Schema = &spec.Schema{ @@ -90,18 +90,18 @@ func TestObjectScalarFieldsRatcheting(t *testing.T) { "small": 500, }, map[string]interface{}{ "small": 500, - }).IsValid()) + }, validation.WithRatcheting(nil)).IsValid()) assert.True(t, validator.ValidateUpdate(map[string]interface{}{ "small": 501, }, map[string]interface{}{ "small": 501, "medium": 500, - }).IsValid()) + }, validation.WithRatcheting(nil)).IsValid()) assert.False(t, validator.ValidateUpdate(map[string]interface{}{ "small": 500, }, map[string]interface{}{ "small": 501, - }).IsValid()) + }, validation.WithRatcheting(nil)).IsValid()) } // Shows schemas with object fields which themselves are ratcheted can be ratcheted @@ -113,7 +113,7 @@ func TestObjectObjectFieldsRatcheting(t *testing.T) { }}, map[string]interface{}{ "nested": map[string]interface{}{ "small": 500, - }}).IsValid()) + }}, validation.WithRatcheting(nil)).IsValid()) assert.True(t, validator.ValidateUpdate(map[string]interface{}{ "nested": map[string]interface{}{ "small": 501, @@ -121,14 +121,14 @@ func TestObjectObjectFieldsRatcheting(t *testing.T) { "nested": map[string]interface{}{ "small": 501, "medium": 500, - }}).IsValid()) + }}, validation.WithRatcheting(nil)).IsValid()) assert.False(t, validator.ValidateUpdate(map[string]interface{}{ "nested": map[string]interface{}{ "small": 500, }}, map[string]interface{}{ "nested": map[string]interface{}{ "small": 501, - }}).IsValid()) + }}, validation.WithRatcheting(nil)).IsValid()) } func ptr[T any](v T) *T { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go index e0042356ac0c4..7304018fb343b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go @@ -24,6 +24,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/features" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/cel/common" utilfeature "k8s.io/apiserver/pkg/util/feature" openapierrors "k8s.io/kube-openapi/pkg/validation/errors" "k8s.io/kube-openapi/pkg/validation/spec" @@ -33,22 +34,60 @@ import ( type SchemaValidator interface { SchemaCreateValidator - ValidateUpdate(new, old interface{}) *validate.Result + ValidateUpdate(new, old interface{}, options ...ValidationOption) *validate.Result } type SchemaCreateValidator interface { - Validate(value interface{}) *validate.Result + Validate(value interface{}, options ...ValidationOption) *validate.Result +} + +type ValidationOptions struct { + // Whether errors from unchanged portions of the schema should be ratcheted + // This field is ignored for Validate + Ratcheting bool + + // Correlation between old and new arguments. + // If set, this is expected to be the correlation between the `new` and + // `old` arguments to ValidateUpdate, and values for `new` and `old` will + // be taken from the correlation. + // + // This field is ignored for Validate + // + // Used for ratcheting, but left as a separate field since it may be used + // for other purposes in the future. + CorrelatedObject *common.CorrelatedObject +} + +type ValidationOption func(*ValidationOptions) + +func NewValidationOptions(opts ...ValidationOption) ValidationOptions { + options := ValidationOptions{} + for _, opt := range opts { + opt(&options) + } + return options +} + +func WithRatcheting(correlation *common.CorrelatedObject) ValidationOption { + return func(options *ValidationOptions) { + options.Ratcheting = true + options.CorrelatedObject = correlation + } } // basicSchemaValidator wraps a kube-openapi SchemaCreateValidator to // support ValidateUpdate. It implements ValidateUpdate by simply validating -// the new value via kube-openapi, ignoring the old value. +// the new value via kube-openapi, ignoring the old value type basicSchemaValidator struct { *validate.SchemaValidator } -func (s basicSchemaValidator) ValidateUpdate(new, old interface{}) *validate.Result { - return s.Validate(new) +func (s basicSchemaValidator) Validate(new interface{}, options ...ValidationOption) *validate.Result { + return s.SchemaValidator.Validate(new) +} + +func (s basicSchemaValidator) ValidateUpdate(new, old interface{}, options ...ValidationOption) *validate.Result { + return s.Validate(new, options...) } // NewSchemaValidator creates an openapi schema validator for the given CRD validation. @@ -67,11 +106,15 @@ func NewSchemaValidator(customResourceValidation *apiextensions.JSONSchemaProps) return nil, nil, err } } + return NewSchemaValidatorFromOpenAPI(openapiSchema), openapiSchema, nil +} +func NewSchemaValidatorFromOpenAPI(openapiSchema *spec.Schema) SchemaValidator { if utilfeature.DefaultFeatureGate.Enabled(features.CRDValidationRatcheting) { - return NewRatchetingSchemaValidator(openapiSchema, nil, "", strfmt.Default), openapiSchema, nil + return NewRatchetingSchemaValidator(openapiSchema, nil, "", strfmt.Default) } - return basicSchemaValidator{validate.NewSchemaValidator(openapiSchema, nil, "", strfmt.Default)}, openapiSchema, nil + return basicSchemaValidator{validate.NewSchemaValidator(openapiSchema, nil, "", strfmt.Default)} + } // ValidateCustomResourceUpdate validates the transition of Custom Resource from @@ -80,7 +123,7 @@ func NewSchemaValidator(customResourceValidation *apiextensions.JSONSchemaProps) // // If feature `CRDValidationRatcheting` is disabled, this behaves identically to // ValidateCustomResource(customResource). -func ValidateCustomResourceUpdate(fldPath *field.Path, customResource, old interface{}, validator SchemaValidator) field.ErrorList { +func ValidateCustomResourceUpdate(fldPath *field.Path, customResource, old interface{}, validator SchemaValidator, options ...ValidationOption) field.ErrorList { // Additional feature gate check for sanity if !utilfeature.DefaultFeatureGate.Enabled(features.CRDValidationRatcheting) { return ValidateCustomResource(nil, customResource, validator) @@ -88,7 +131,7 @@ func ValidateCustomResourceUpdate(fldPath *field.Path, customResource, old inter return nil } - result := validator.ValidateUpdate(customResource, old) + result := validator.ValidateUpdate(customResource, old, options...) if result.IsValid() { return nil } @@ -98,12 +141,12 @@ func ValidateCustomResourceUpdate(fldPath *field.Path, customResource, old inter // ValidateCustomResource validates the Custom Resource against the schema in the CustomResourceDefinition. // CustomResource is a JSON data structure. -func ValidateCustomResource(fldPath *field.Path, customResource interface{}, validator SchemaCreateValidator) field.ErrorList { +func ValidateCustomResource(fldPath *field.Path, customResource interface{}, validator SchemaCreateValidator, options ...ValidationOption) field.ErrorList { if validator == nil { return nil } - result := validator.Validate(customResource) + result := validator.Validate(customResource, options...) if result.IsValid() { return nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go index bdfd1d25dfd2a..676c8ad36b9db 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/builder/builder.go @@ -195,7 +195,7 @@ func BuildOpenAPIV3(crd *apiextensionsv1.CustomResourceDefinition, version strin return nil, err } - return builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices([]*restful.WebService{b.ws}), b.getOpenAPIConfig(false)) + return builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices([]*restful.WebService{b.ws}), b.getOpenAPIV3Config()) } // BuildOpenAPIV2 builds OpenAPI v2 for the given crd in the given version @@ -205,7 +205,7 @@ func BuildOpenAPIV2(crd *apiextensionsv1.CustomResourceDefinition, version strin return nil, err } - return openapibuilder.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices([]*restful.WebService{b.ws}), b.getOpenAPIConfig(true)) + return openapibuilder.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices([]*restful.WebService{b.ws}), b.getOpenAPIConfig()) } // Implements CanonicalTypeNamer @@ -311,7 +311,7 @@ func (b *builder) buildRoute(root, path, httpMethod, actionVerb, operationVerb s Path(root+path). To(func(req *restful.Request, res *restful.Response) {}). Doc(b.descriptionFor(path, operationVerb)). - Param(b.ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(b.ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation(operationVerb+namespaced+b.kind+strings.Title(subresource(path))). Metadata(endpoints.ROUTE_META_GVK, metav1.GroupVersionKind{ Group: b.group, @@ -508,7 +508,7 @@ func (b *builder) buildListSchema(v2 bool) *spec.Schema { } // getOpenAPIConfig builds config which wires up generated definitions for kube-openapi to consume -func (b *builder) getOpenAPIConfig(v2 bool) *common.Config { +func (b *builder) getOpenAPIConfig() *common.Config { return &common.Config{ ProtocolList: []string{"https"}, Info: &spec.Info{ @@ -543,6 +543,40 @@ func (b *builder) getOpenAPIConfig(v2 bool) *common.Config { } } +func (b *builder) getOpenAPIV3Config() *common.OpenAPIV3Config { + return &common.OpenAPIV3Config{ + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "Kubernetes CRD Swagger", + Version: "v0.1.0", + }, + }, + CommonResponses: map[int]*spec3.Response{ + 401: { + ResponseProps: spec3.ResponseProps{ + Description: "Unauthorized", + }, + }, + }, + GetOperationIDAndTags: openapi.GetOperationIDAndTags, + GetDefinitionName: func(name string) (string, spec.Extensions) { + buildDefinitions.Do(generateBuildDefinitionsFunc) + return namer.GetDefinitionName(name) + }, + GetDefinitions: func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + def := utilopenapi.GetOpenAPIDefinitionsWithoutDisabledFeatures(generatedopenapi.GetOpenAPIDefinitions)(ref) + def[fmt.Sprintf("%s/%s.%s", b.group, b.version, b.kind)] = common.OpenAPIDefinition{ + Schema: *b.schema, + Dependencies: []string{objectMetaType}, + } + def[fmt.Sprintf("%s/%s.%s", b.group, b.version, b.listKind)] = common.OpenAPIDefinition{ + Schema: *b.listSchema, + } + return def + }, + } +} + func newBuilder(crd *apiextensionsv1.CustomResourceDefinition, version string, schema *structuralschema.Structural, opts Options) *builder { b := &builder{ schema: &spec.Schema{ diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go index ee9c0afb0fafa..b2ffe6b3db877 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/openapi/controller.go @@ -66,24 +66,23 @@ type Controller struct { // changed. crdCache is a cached.Replaceable and updates are thread // safe. Thus, no lock is needed to protect this struct. type specCache struct { - crdCache cached.Replaceable[*apiextensionsv1.CustomResourceDefinition] - mergedVersionSpec cached.Data[*spec.Swagger] + crdCache cached.LastSuccess[*apiextensionsv1.CustomResourceDefinition] + mergedVersionSpec cached.Value[*spec.Swagger] } func (s *specCache) update(crd *apiextensionsv1.CustomResourceDefinition) { - s.crdCache.Replace(cached.NewResultOK(crd, generateCRDHash(crd))) + s.crdCache.Store(cached.Static(crd, generateCRDHash(crd))) } func createSpecCache(crd *apiextensionsv1.CustomResourceDefinition) *specCache { s := specCache{} s.update(crd) - s.mergedVersionSpec = cached.NewTransformer[*apiextensionsv1.CustomResourceDefinition](func(result cached.Result[*apiextensionsv1.CustomResourceDefinition]) cached.Result[*spec.Swagger] { - if result.Err != nil { + s.mergedVersionSpec = cached.Transform[*apiextensionsv1.CustomResourceDefinition](func(crd *apiextensionsv1.CustomResourceDefinition, etag string, err error) (*spec.Swagger, string, error) { + if err != nil { // This should never happen, but return the err if it does. - return cached.NewResultErr[*spec.Swagger](result.Err) + return nil, "", err } - crd := result.Data mergeSpec := &spec.Swagger{} for _, v := range crd.Spec.Versions { if !v.Served { @@ -93,15 +92,15 @@ func createSpecCache(crd *apiextensionsv1.CustomResourceDefinition) *specCache { // Defaults must be pruned here for CRDs to cleanly merge with the static // spec that already has defaults pruned if err != nil { - return cached.NewResultErr[*spec.Swagger](err) + return nil, "", err } s.Definitions = handler.PruneDefaults(s.Definitions) mergeSpec, err = builder.MergeSpecs(mergeSpec, s) if err != nil { - return cached.NewResultErr[*spec.Swagger](err) + return nil, "", err } } - return cached.NewResultOK(mergeSpec, generateCRDHash(crd)) + return mergeSpec, generateCRDHash(crd), nil }, &s.crdCache) return &s } @@ -234,27 +233,27 @@ func (c *Controller) sync(name string) error { // updateSpecLocked updates the cached spec graph. func (c *Controller) updateSpecLocked() { - specList := make([]cached.Data[*spec.Swagger], 0, len(c.specsByName)) + specList := make([]cached.Value[*spec.Swagger], 0, len(c.specsByName)) for crd := range c.specsByName { specList = append(specList, c.specsByName[crd].mergedVersionSpec) } - cache := cached.NewListMerger(func(results []cached.Result[*spec.Swagger]) cached.Result[*spec.Swagger] { + cache := cached.MergeList(func(results []cached.Result[*spec.Swagger]) (*spec.Swagger, string, error) { localCRDSpec := make([]*spec.Swagger, 0, len(results)) for k := range results { if results[k].Err == nil { - localCRDSpec = append(localCRDSpec, results[k].Data) + localCRDSpec = append(localCRDSpec, results[k].Value) } } mergedSpec, err := builder.MergeSpecs(c.staticSpec, localCRDSpec...) if err != nil { - return cached.NewResultErr[*spec.Swagger](fmt.Errorf("failed to merge specs: %v", err)) + return nil, "", fmt.Errorf("failed to merge specs: %v", err) } // A UUID is returned for the etag because we will only // create a new merger when a CRD has changed. A hash based // etag is more expensive because the CRDs are not // premarshalled. - return cached.NewResultOK(mergedSpec, uuid.New().String()) + return mergedSpec, uuid.New().String(), nil }, specList) c.openAPIService.UpdateSpecLazy(cache) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go index d6378df7ac302..f4992e619fb31 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go @@ -230,7 +230,6 @@ func schema_k8sio_api_autoscaling_v1_ContainerResourceMetricStatus(ref common.Re "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -359,7 +358,6 @@ func schema_k8sio_api_autoscaling_v1_ExternalMetricStatus(ref common.ReferenceCa "currentValue": { SchemaProps: spec.SchemaProps{ Description: "currentValue is the current value of the metric (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -454,7 +452,6 @@ func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerCondition(ref common "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -764,7 +761,6 @@ func schema_k8sio_api_autoscaling_v1_ObjectMetricSource(ref common.ReferenceCall "targetValue": { SchemaProps: spec.SchemaProps{ Description: "targetValue is the target value of the metric (as a quantity).", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -814,7 +810,6 @@ func schema_k8sio_api_autoscaling_v1_ObjectMetricStatus(ref common.ReferenceCall "currentValue": { SchemaProps: spec.SchemaProps{ Description: "currentValue is the current value of the metric (as a quantity).", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -857,7 +852,6 @@ func schema_k8sio_api_autoscaling_v1_PodsMetricSource(ref common.ReferenceCallba "targetAverageValue": { SchemaProps: spec.SchemaProps{ Description: "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -894,7 +888,6 @@ func schema_k8sio_api_autoscaling_v1_PodsMetricStatus(ref common.ReferenceCallba "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -975,7 +968,6 @@ func schema_k8sio_api_autoscaling_v1_ResourceMetricStatus(ref common.ReferenceCa "currentAverageValue": { SchemaProps: spec.SchemaProps{ Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -1117,8 +1109,7 @@ func schema_pkg_apis_apiextensions_v1_ConversionRequest(ref common.ReferenceCall Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -1155,8 +1146,7 @@ func schema_pkg_apis_apiextensions_v1_ConversionResponse(ref common.ReferenceCal Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -1385,7 +1375,6 @@ func schema_pkg_apis_apiextensions_v1_CustomResourceDefinitionCondition(ref comm "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2003,8 +1992,7 @@ func schema_pkg_apis_apiextensions_v1_JSONSchemaProps(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), }, }, }, @@ -2125,8 +2113,7 @@ func schema_pkg_apis_apiextensions_v1_JSONSchemaProps(ref common.ReferenceCallba Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSONSchemaPropsOrStringArray"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSONSchemaPropsOrStringArray"), }, }, }, @@ -2482,8 +2469,7 @@ func schema_pkg_apis_apiextensions_v1beta1_ConversionRequest(ref common.Referenc Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -2520,8 +2506,7 @@ func schema_pkg_apis_apiextensions_v1beta1_ConversionResponse(ref common.Referen Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -2765,7 +2750,6 @@ func schema_pkg_apis_apiextensions_v1beta1_CustomResourceDefinitionCondition(ref "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -3416,8 +3400,7 @@ func schema_pkg_apis_apiextensions_v1beta1_JSONSchemaProps(ref common.ReferenceC Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSON"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSON"), }, }, }, @@ -3538,8 +3521,7 @@ func schema_pkg_apis_apiextensions_v1beta1_JSONSchemaProps(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSONSchemaPropsOrStringArray"), + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.JSONSchemaPropsOrStringArray"), }, }, }, @@ -4260,7 +4242,6 @@ func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.Open "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -4816,8 +4797,7 @@ func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -5103,7 +5083,6 @@ func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.Ope "creationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -5885,7 +5864,6 @@ func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenA "object": { SchemaProps: spec.SchemaProps{ Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -6084,7 +6062,6 @@ func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.Ope "object": { SchemaProps: spec.SchemaProps{ Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go index 8d3796ac9afcb..ceaf56629b08f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go @@ -18,6 +18,7 @@ package customresource import ( "context" + "fmt" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -84,36 +85,31 @@ func (a statusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.O // ValidateUpdate is the default update validation for an end user updating status. func (a statusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - var errs field.ErrorList - errs = append(errs, a.customResourceStrategy.validator.ValidateStatusUpdate(ctx, obj, old, a.scale)...) - uNew, ok := obj.(*unstructured.Unstructured) if !ok { - return errs + return field.ErrorList{field.Invalid(field.NewPath(""), obj, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", obj))} } uOld, ok := old.(*unstructured.Unstructured) - var oldObject map[string]interface{} if !ok { - oldObject = nil - } else { - oldObject = uOld.Object + return field.ErrorList{field.Invalid(field.NewPath(""), old, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", old))} } - v := obj.GetObjectKind().GroupVersionKind().Version + var errs field.ErrorList + errs = append(errs, a.customResourceStrategy.validator.ValidateStatusUpdate(ctx, uNew, uOld, a.scale)...) // ratcheting validation of x-kubernetes-list-type value map and set - if newErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uNew.Object); len(newErrs) > 0 { - if oldErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], oldObject); len(oldErrs) == 0 { + if newErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchema, uNew.Object); len(newErrs) > 0 { + if oldErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchema, uOld.Object); len(oldErrs) == 0 { errs = append(errs, newErrs...) } } // validate x-kubernetes-validations rules - if celValidator, ok := a.customResourceStrategy.celValidators[v]; ok { + if celValidator := a.customResourceStrategy.celValidator; celValidator != nil { if has, err := hasBlockingErr(errs); has { errs = append(errs, err) } else { - err, _ := celValidator.Validate(ctx, nil, a.customResourceStrategy.structuralSchemas[v], uNew.Object, oldObject, celconfig.RuntimeCELCostBudget) + err, _ := celValidator.Validate(ctx, nil, a.customResourceStrategy.structuralSchema, uNew.Object, uOld.Object, celconfig.RuntimeCELCostBudget) errs = append(errs, err...) } } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy_test.go index a91e8f0587d8c..97538054286cd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy_test.go @@ -198,9 +198,7 @@ func TestStatusStrategyValidateUpdate(t *testing.T) { } strategy.customResourceStrategy.validator.kind = kind ss, _ := structuralschema.NewStructural(crd.Spec.Versions[0].Schema.OpenAPIV3Schema) - strategy.structuralSchemas = map[string]*structuralschema.Structural{ - crd.Spec.Versions[0].Name: ss, - } + strategy.structuralSchema = ss ctx := context.TODO() diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go index 0fdac7635b913..3852125c23d24 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go @@ -18,13 +18,16 @@ package customresource import ( "context" + "fmt" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model" structurallisttype "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/listtype" schemaobjectmeta "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta" "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" + apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" celconfig "k8s.io/apiserver/pkg/apis/cel" + "k8s.io/apiserver/pkg/cel/common" "k8s.io/apiserver/pkg/features" apiserverstorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" @@ -44,29 +48,25 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) -// customResourceStrategy implements behavior for CustomResources. +// customResourceStrategy implements behavior for CustomResources for a single +// version type customResourceStrategy struct { runtime.ObjectTyper names.NameGenerator - namespaceScoped bool - validator customResourceValidator - structuralSchemas map[string]*structuralschema.Structural - celValidators map[string]*cel.Validator - status *apiextensions.CustomResourceSubresourceStatus - scale *apiextensions.CustomResourceSubresourceScale - kind schema.GroupVersionKind + namespaceScoped bool + validator customResourceValidator + structuralSchema *structuralschema.Structural + celValidator *cel.Validator + status *apiextensions.CustomResourceSubresourceStatus + scale *apiextensions.CustomResourceSubresourceScale + kind schema.GroupVersionKind } -func NewStrategy(typer runtime.ObjectTyper, namespaceScoped bool, kind schema.GroupVersionKind, schemaValidator, statusSchemaValidator validation.SchemaValidator, structuralSchemas map[string]*structuralschema.Structural, status *apiextensions.CustomResourceSubresourceStatus, scale *apiextensions.CustomResourceSubresourceScale) customResourceStrategy { - celValidators := map[string]*cel.Validator{} +func NewStrategy(typer runtime.ObjectTyper, namespaceScoped bool, kind schema.GroupVersionKind, schemaValidator, statusSchemaValidator validation.SchemaValidator, structuralSchema *structuralschema.Structural, status *apiextensions.CustomResourceSubresourceStatus, scale *apiextensions.CustomResourceSubresourceScale) customResourceStrategy { + var celValidator *cel.Validator if utilfeature.DefaultFeatureGate.Enabled(features.CustomResourceValidationExpressions) { - for name, s := range structuralSchemas { - v := cel.NewValidator(s, true, celconfig.PerCallLimit) // CEL programs are compiled and cached here - if v != nil { - celValidators[name] = v - } - } + celValidator = cel.NewValidator(structuralSchema, true, celconfig.PerCallLimit) // CEL programs are compiled and cached here } return customResourceStrategy{ @@ -81,9 +81,9 @@ func NewStrategy(typer runtime.ObjectTyper, namespaceScoped bool, kind schema.Gr schemaValidator: schemaValidator, statusSchemaValidator: statusSchemaValidator, }, - structuralSchemas: structuralSchemas, - celValidators: celValidators, - kind: kind, + structuralSchema: structuralSchema, + celValidator: celValidator, + kind: kind, } } @@ -163,25 +163,27 @@ func copyNonMetadata(original map[string]interface{}) map[string]interface{} { // Validate validates a new CustomResource. func (a customResourceStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return field.ErrorList{field.Invalid(field.NewPath(""), u, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", obj))} + } + var errs field.ErrorList - errs = append(errs, a.validator.Validate(ctx, obj, a.scale)...) + errs = append(errs, a.validator.Validate(ctx, u, a.scale)...) // validate embedded resources - if u, ok := obj.(*unstructured.Unstructured); ok { - v := obj.GetObjectKind().GroupVersionKind().Version - errs = append(errs, schemaobjectmeta.Validate(nil, u.Object, a.structuralSchemas[v], false)...) - - // validate x-kubernetes-list-type "map" and "set" invariant - errs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], u.Object)...) - - // validate x-kubernetes-validations rules - if celValidator, ok := a.celValidators[v]; ok { - if has, err := hasBlockingErr(errs); has { - errs = append(errs, err) - } else { - err, _ := celValidator.Validate(ctx, nil, a.structuralSchemas[v], u.Object, nil, celconfig.RuntimeCELCostBudget) - errs = append(errs, err...) - } + errs = append(errs, schemaobjectmeta.Validate(nil, u.Object, a.structuralSchema, false)...) + + // validate x-kubernetes-list-type "map" and "set" invariant + errs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchema, u.Object)...) + + // validate x-kubernetes-validations rules + if celValidator := a.celValidator; celValidator != nil { + if has, err := hasBlockingErr(errs); has { + errs = append(errs, err) + } else { + err, _ := celValidator.Validate(ctx, nil, a.structuralSchema, u.Object, nil, celconfig.RuntimeCELCostBudget) + errs = append(errs, err...) } } @@ -234,33 +236,40 @@ func (customResourceStrategy) AllowUnconditionalUpdate() bool { // ValidateUpdate is the default update validation for an end user updating status. func (a customResourceStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - var errs field.ErrorList - errs = append(errs, a.validator.ValidateUpdate(ctx, obj, old, a.scale)...) - uNew, ok := obj.(*unstructured.Unstructured) if !ok { - return errs + return field.ErrorList{field.Invalid(field.NewPath(""), obj, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", obj))} } uOld, ok := old.(*unstructured.Unstructured) if !ok { - return errs + return field.ErrorList{field.Invalid(field.NewPath(""), old, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", old))} } + var options []validation.ValidationOption + var celOptions []cel.Option + if utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CRDValidationRatcheting) { + correlatedObject := common.NewCorrelatedObject(uNew.Object, uOld.Object, &model.Structural{Structural: a.structuralSchema}) + options = append(options, validation.WithRatcheting(correlatedObject)) + celOptions = append(celOptions, cel.WithRatcheting(correlatedObject)) + } + + var errs field.ErrorList + errs = append(errs, a.validator.ValidateUpdate(ctx, uNew, uOld, a.scale, options...)...) + // Checks the embedded objects. We don't make a difference between update and create for those. - v := obj.GetObjectKind().GroupVersionKind().Version - errs = append(errs, schemaobjectmeta.Validate(nil, uNew.Object, a.structuralSchemas[v], false)...) + errs = append(errs, schemaobjectmeta.Validate(nil, uNew.Object, a.structuralSchema, false)...) // ratcheting validation of x-kubernetes-list-type value map and set - if oldErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uOld.Object); len(oldErrs) == 0 { - errs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchemas[v], uNew.Object)...) + if oldErrs := structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchema, uOld.Object); len(oldErrs) == 0 { + errs = append(errs, structurallisttype.ValidateListSetsAndMaps(nil, a.structuralSchema, uNew.Object)...) } // validate x-kubernetes-validations rules - if celValidator, ok := a.celValidators[v]; ok { + if celValidator := a.celValidator; celValidator != nil { if has, err := hasBlockingErr(errs); has { errs = append(errs, err) } else { - err, _ := celValidator.Validate(ctx, nil, a.structuralSchemas[v], uNew.Object, uOld.Object, celconfig.RuntimeCELCostBudget) + err, _ := celValidator.Validate(ctx, nil, a.structuralSchema, uNew.Object, uOld.Object, celconfig.RuntimeCELCostBudget, celOptions...) errs = append(errs, err...) } } @@ -306,10 +315,10 @@ func (a customResourceStrategy) MatchCustomResourceDefinitionStorage(label label } } -// OpenAPIv3 type/maxLength/maxItems/MaxProperties/required/wrong type field validation failures are viewed as blocking err for CEL validation +// OpenAPIv3 type/maxLength/maxItems/MaxProperties/required/enum violation/wrong type field validation failures are viewed as blocking err for CEL validation func hasBlockingErr(errs field.ErrorList) (bool, *field.Error) { for _, err := range errs { - if err.Type == field.ErrorTypeRequired || err.Type == field.ErrorTypeTooLong || err.Type == field.ErrorTypeTooMany || err.Type == field.ErrorTypeTypeInvalid { + if err.Type == field.ErrorTypeNotSupported || err.Type == field.ErrorTypeRequired || err.Type == field.ErrorTypeTooLong || err.Type == field.ErrorTypeTooMany || err.Type == field.ErrorTypeTypeInvalid { return true, field.Invalid(nil, nil, "some validation rules were not checked because the object was invalid; correct the existing errors to complete validation") } } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go index 54be1db9752df..adcb5c22024d1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" apimachineryvalidation "k8s.io/apimachinery/pkg/util/validation" @@ -44,58 +43,32 @@ type customResourceValidator struct { statusSchemaValidator apiextensionsvalidation.SchemaValidator } -func (a customResourceValidator) Validate(ctx context.Context, obj runtime.Object, scale *apiextensions.CustomResourceSubresourceScale) field.ErrorList { - u, ok := obj.(*unstructured.Unstructured) - if !ok { - return field.ErrorList{field.Invalid(field.NewPath(""), u, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", u))} - } - accessor, err := meta.Accessor(obj) - if err != nil { - return field.ErrorList{field.Invalid(field.NewPath("metadata"), nil, err.Error())} - } - - if errs := a.ValidateTypeMeta(ctx, u); len(errs) > 0 { +func (a customResourceValidator) Validate(ctx context.Context, obj *unstructured.Unstructured, scale *apiextensions.CustomResourceSubresourceScale) field.ErrorList { + if errs := a.ValidateTypeMeta(ctx, obj); len(errs) > 0 { return errs } var allErrs field.ErrorList - allErrs = append(allErrs, validation.ValidateObjectMetaAccessor(accessor, a.namespaceScoped, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) - allErrs = append(allErrs, apiextensionsvalidation.ValidateCustomResource(nil, u.UnstructuredContent(), a.schemaValidator)...) - allErrs = append(allErrs, a.ValidateScaleSpec(ctx, u, scale)...) - allErrs = append(allErrs, a.ValidateScaleStatus(ctx, u, scale)...) + allErrs = append(allErrs, validation.ValidateObjectMetaAccessor(obj, a.namespaceScoped, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + allErrs = append(allErrs, apiextensionsvalidation.ValidateCustomResource(nil, obj.UnstructuredContent(), a.schemaValidator)...) + allErrs = append(allErrs, a.ValidateScaleSpec(ctx, obj, scale)...) + allErrs = append(allErrs, a.ValidateScaleStatus(ctx, obj, scale)...) return allErrs } -func (a customResourceValidator) ValidateUpdate(ctx context.Context, obj, old runtime.Object, scale *apiextensions.CustomResourceSubresourceScale) field.ErrorList { - u, ok := obj.(*unstructured.Unstructured) - if !ok { - return field.ErrorList{field.Invalid(field.NewPath(""), u, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", u))} - } - oldU, ok := old.(*unstructured.Unstructured) - if !ok { - return field.ErrorList{field.Invalid(field.NewPath(""), old, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", u))} - } - objAccessor, err := meta.Accessor(obj) - if err != nil { - return field.ErrorList{field.Invalid(field.NewPath("metadata"), nil, err.Error())} - } - oldAccessor, err := meta.Accessor(old) - if err != nil { - return field.ErrorList{field.Invalid(field.NewPath("metadata"), nil, err.Error())} - } - - if errs := a.ValidateTypeMeta(ctx, u); len(errs) > 0 { +func (a customResourceValidator) ValidateUpdate(ctx context.Context, obj, old *unstructured.Unstructured, scale *apiextensions.CustomResourceSubresourceScale, options ...apiextensionsvalidation.ValidationOption) field.ErrorList { + if errs := a.ValidateTypeMeta(ctx, obj); len(errs) > 0 { return errs } var allErrs field.ErrorList - allErrs = append(allErrs, validation.ValidateObjectMetaAccessorUpdate(objAccessor, oldAccessor, field.NewPath("metadata"))...) - allErrs = append(allErrs, apiextensionsvalidation.ValidateCustomResourceUpdate(nil, u.UnstructuredContent(), oldU.UnstructuredContent(), a.schemaValidator)...) - allErrs = append(allErrs, a.ValidateScaleSpec(ctx, u, scale)...) - allErrs = append(allErrs, a.ValidateScaleStatus(ctx, u, scale)...) + allErrs = append(allErrs, validation.ValidateObjectMetaAccessorUpdate(obj, old, field.NewPath("metadata"))...) + allErrs = append(allErrs, apiextensionsvalidation.ValidateCustomResourceUpdate(nil, obj.UnstructuredContent(), old.UnstructuredContent(), a.schemaValidator, options...)...) + allErrs = append(allErrs, a.ValidateScaleSpec(ctx, obj, scale)...) + allErrs = append(allErrs, a.ValidateScaleStatus(ctx, obj, scale)...) return allErrs } @@ -119,35 +92,18 @@ func validateKubeFinalizerName(stringValue string, fldPath *field.Path) []string return allWarnings } -func (a customResourceValidator) ValidateStatusUpdate(ctx context.Context, obj, old runtime.Object, scale *apiextensions.CustomResourceSubresourceScale) field.ErrorList { - u, ok := obj.(*unstructured.Unstructured) - if !ok { - return field.ErrorList{field.Invalid(field.NewPath(""), u, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", u))} - } - oldU, ok := old.(*unstructured.Unstructured) - if !ok { - return field.ErrorList{field.Invalid(field.NewPath(""), old, fmt.Sprintf("has type %T. Must be a pointer to an Unstructured type", u))} - } - objAccessor, err := meta.Accessor(obj) - if err != nil { - return field.ErrorList{field.Invalid(field.NewPath("metadata"), nil, err.Error())} - } - oldAccessor, err := meta.Accessor(old) - if err != nil { - return field.ErrorList{field.Invalid(field.NewPath("metadata"), nil, err.Error())} - } - - if errs := a.ValidateTypeMeta(ctx, u); len(errs) > 0 { +func (a customResourceValidator) ValidateStatusUpdate(ctx context.Context, obj, old *unstructured.Unstructured, scale *apiextensions.CustomResourceSubresourceScale) field.ErrorList { + if errs := a.ValidateTypeMeta(ctx, obj); len(errs) > 0 { return errs } var allErrs field.ErrorList - allErrs = append(allErrs, validation.ValidateObjectMetaAccessorUpdate(objAccessor, oldAccessor, field.NewPath("metadata"))...) - if status, hasStatus := u.UnstructuredContent()["status"]; hasStatus { - allErrs = append(allErrs, apiextensionsvalidation.ValidateCustomResourceUpdate(nil, status, oldU.UnstructuredContent()["status"], a.statusSchemaValidator)...) + allErrs = append(allErrs, validation.ValidateObjectMetaAccessorUpdate(obj, old, field.NewPath("metadata"))...) + if status, hasStatus := obj.UnstructuredContent()["status"]; hasStatus { + allErrs = append(allErrs, apiextensionsvalidation.ValidateCustomResourceUpdate(nil, status, old.UnstructuredContent()["status"], a.statusSchemaValidator)...) } - allErrs = append(allErrs, a.ValidateScaleStatus(ctx, u, scale)...) + allErrs = append(allErrs, a.ValidateScaleStatus(ctx, obj, scale)...) return allErrs } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/ratcheting_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/ratcheting_test.go index cc88e1d8114a5..6d0b2c549442c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/ratcheting_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/ratcheting_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration_test import ( + "bytes" "context" "encoding/json" "errors" @@ -36,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/dynamic" featuregatetesting "k8s.io/component-base/featuregate/testing" @@ -95,11 +97,50 @@ var fakeRESTMapper map[schema.GroupVersionResource]string = map[schema.GroupVers myCRDV1Beta1: "MyCoolCRD", } +// FixTabsOrDie counts the number of tab characters preceding the first +// line in the given yaml object. It removes that many tabs from every +// line. It panics (it's a test function) if some line has fewer tabs +// than the first line. +// +// The purpose of this is to make it easier to read tests. +func FixTabsOrDie(in string) string { + lines := bytes.Split([]byte(in), []byte{'\n'}) + if len(lines[0]) == 0 && len(lines) > 1 { + lines = lines[1:] + } + // Create prefix made of tabs that we want to remove. + var prefix []byte + for _, c := range lines[0] { + if c != '\t' { + break + } + prefix = append(prefix, byte('\t')) + } + // Remove prefix from all tabs, fail otherwise. + for i := range lines { + line := lines[i] + // It's OK for the last line to be blank (trailing \n) + if i == len(lines)-1 && len(line) <= len(prefix) && bytes.TrimSpace(line) == nil { + lines[i] = []byte{} + break + } + if !bytes.HasPrefix(line, prefix) { + panic(fmt.Errorf("line %d doesn't start with expected number (%d) of tabs: %v", i, len(prefix), string(line))) + } + lines[i] = line[len(prefix):] + } + joined := string(bytes.Join(lines, []byte{'\n'})) + + // Convert rest of tabs to spaces since yaml doesnt like yabs + // (assuming 2 space alignment) + return strings.ReplaceAll(joined, "\t", " ") +} + type applyPatchOperation struct { description string gvr schema.GroupVersionResource name string - patch map[string]interface{} + patch interface{} } func (a applyPatchOperation) Do(ctx *ratchetingTestContext) error { @@ -109,25 +150,33 @@ func (a applyPatchOperation) Do(ctx *ratchetingTestContext) error { return fmt.Errorf("no mapping found for Gvr %v, add entry to fakeRESTMapper", a.gvr) } - a.patch["kind"] = kind - a.patch["apiVersion"] = a.gvr.GroupVersion().String() - - if meta, ok := a.patch["metadata"]; ok { - mObj := meta.(map[string]interface{}) - mObj["name"] = a.name - mObj["namespace"] = "default" - } else { - a.patch["metadata"] = map[string]interface{}{ - "name": a.name, - "namespace": "default", + patch := &unstructured.Unstructured{} + if obj, ok := a.patch.(map[string]interface{}); ok { + patch.Object = obj + } else if str, ok := a.patch.(string); ok { + str = FixTabsOrDie(str) + if err := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(str), len(str)).Decode(&patch.Object); err != nil { + return err } + } else { + return fmt.Errorf("invalid patch type: %T", a.patch) } - _, err := ctx.DynamicClient.Resource(a.gvr).Namespace("default").Apply(context.TODO(), a.name, &unstructured.Unstructured{ - Object: a.patch, - }, metav1.ApplyOptions{ - FieldManager: "manager", - }) + patch.SetKind(kind) + patch.SetAPIVersion(a.gvr.GroupVersion().String()) + patch.SetName(a.name) + patch.SetNamespace("default") + + _, err := ctx.DynamicClient. + Resource(a.gvr). + Namespace(patch.GetNamespace()). + Apply( + context.TODO(), + patch.GetName(), + patch, + metav1.ApplyOptions{ + FieldManager: "manager", + }) return err @@ -1175,7 +1224,7 @@ func TestRatchetingFunctionality(t *testing.T) { }, }, { - Name: "ArrayItems correlate by index", + Name: "ArrayItems do not correlate by index", Operations: []ratchetingTestOperation{ updateMyCRDV1Beta1Schema{&apiextensionsv1.JSONSchemaProps{ Type: "object", @@ -1246,9 +1295,9 @@ func TestRatchetingFunctionality(t *testing.T) { }, "otherField": "hello world", }}, - // (This test shows an array can be correlated by index with its old value) - applyPatchOperation{ - "add new, valid fields to elements of the array, ratcheting unchanged old fields within the array elements by correlating by index", + // (This test shows an array cannpt be correlated by index with its old value) + expectError{applyPatchOperation{ + "add new, valid fields to elements of the array, failing to ratchet unchanged old fields within the array elements by correlating by index due to atomic list", myCRDV1Beta1, myCRDInstanceName, map[string]interface{}{ "values": []interface{}{ map[string]interface{}{ @@ -1261,7 +1310,7 @@ func TestRatchetingFunctionality(t *testing.T) { "key2": "valid value", }, }, - }}, + }}}, expectError{ applyPatchOperation{ "reorder the array, preventing index correlation", @@ -1295,13 +1344,54 @@ func TestRatchetingFunctionality(t *testing.T) { }, { Name: "CEL_transition_rules_should_not_ratchet", + Operations: []ratchetingTestOperation{ + updateMyCRDV1Beta1Schema{&apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: ptr(true), + }}, + applyPatchOperation{ + "create instance with strings that do not start with k8s", + myCRDV1Beta1, myCRDInstanceName, + ` + myStringField: myStringValue + myOtherField: myOtherField + `, + }, + updateMyCRDV1Beta1Schema{&apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: ptr(true), + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "myStringField": { + Type: "string", + XValidations: apiextensionsv1.ValidationRules{ + { + Rule: "oldSelf != 'myStringValue' || self == 'validstring'", + }, + }, + }, + }, + }}, + expectError{applyPatchOperation{ + "try to change one field to valid value, but unchanged field fails to be ratcheted by transition rule", + myCRDV1Beta1, myCRDInstanceName, + ` + myOtherField: myNewOtherField + myStringField: myStringValue + `, + }}, + applyPatchOperation{ + "change both fields to valid values", + myCRDV1Beta1, myCRDInstanceName, + ` + myStringField: validstring + myOtherField: myNewOtherField + `, + }, + }, }, // Future Functionality, disabled tests { Name: "CEL Add Change Rule", - // Planned future test. CEL Rules are not yet ratcheted in alpha - // implementation of CRD Validation Ratcheting - Disabled: true, Operations: []ratchetingTestOperation{ updateMyCRDV1Beta1Schema{&apiextensionsv1.JSONSchemaProps{ Type: "object", diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index e9e40543f6ecd..0646884a73881 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -2,7 +2,7 @@ module k8s.io/apimachinery -go 1.20 +go 1.21.3 require ( github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 @@ -16,14 +16,14 @@ require ( github.com/google/uuid v1.3.0 github.com/moby/spdystream v0.2.0 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f - github.com/onsi/ginkgo/v2 v2.9.4 + github.com/onsi/ginkgo/v2 v2.13.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - golang.org/x/net v0.13.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.17.0 golang.org/x/time v0.3.0 gopkg.in/inf.v0 v0.9.1 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/structured-merge-diff/v4 v4.3.0 @@ -43,12 +43,13 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/gomega v1.28.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum index a91db8e45655e..1ed7e6914c4b3 100644 --- a/staging/src/k8s.io/apimachinery/go.sum +++ b/staging/src/k8s.io/apimachinery/go.sum @@ -67,10 +67,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -89,47 +89,48 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -150,11 +151,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go index 60c8209de02f2..cbdf2eeb831fc 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,14 +22,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true +// if the conditions are changed by this call. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { if conditions == nil { - return + return false } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return + return true } if existingCondition.Status != newCondition.Status { @@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + changed = true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration + if existingCondition.Reason != newCondition.Reason { + existingCondition.Reason = newCondition.Reason + changed = true + } + if existingCondition.Message != newCondition.Message { + existingCondition.Message = newCondition.Message + changed = true + } + if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { + existingCondition.ObservedGeneration = newCondition.ObservedGeneration + changed = true + } + + return changed } -// RemoveStatusCondition removes the corresponding conditionType from conditions. +// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns +// true if it was present and got removed. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { if conditions == nil || len(*conditions) == 0 { - return + return false } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } + removed = len(*conditions) != len(newConditions) *conditions = newConditions + + return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions_test.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions_test.go index 72e5f680cac3b..248c885800bcf 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/conditions_test.go @@ -29,10 +29,11 @@ func TestSetStatusCondition(t *testing.T) { oneHourAfter := time.Now().Add(1 * time.Hour) tests := []struct { - name string - conditions []metav1.Condition - toAdd metav1.Condition - expected []metav1.Condition + name string + conditions []metav1.Condition + toAdd metav1.Condition + expectChanged bool + expected []metav1.Condition }{ { name: "should-add", @@ -40,7 +41,8 @@ func TestSetStatusCondition(t *testing.T) { {Type: "first"}, {Type: "third"}, }, - toAdd: metav1.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + toAdd: metav1.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + expectChanged: true, expected: []metav1.Condition{ {Type: "first"}, {Type: "third"}, @@ -54,7 +56,8 @@ func TestSetStatusCondition(t *testing.T) { {Type: "second", Status: metav1.ConditionFalse}, {Type: "third"}, }, - toAdd: metav1.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + toAdd: metav1.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + expectChanged: true, expected: []metav1.Condition{ {Type: "first"}, {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, @@ -68,18 +71,36 @@ func TestSetStatusCondition(t *testing.T) { {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}}, {Type: "third"}, }, - toAdd: metav1.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourAfter}, Reason: "reason", Message: "message", ObservedGeneration: 3}, + toAdd: metav1.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourAfter}, Reason: "reason", Message: "message", ObservedGeneration: 3}, + expectChanged: true, expected: []metav1.Condition{ {Type: "first"}, {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message", ObservedGeneration: 3}, {Type: "third"}, }, }, + { + name: "nothing changes", + conditions: []metav1.Condition{{ + Type: "type", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: oneHourBefore}, + }}, + toAdd: metav1.Condition{Type: "type", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}}, + expected: []metav1.Condition{{ + Type: "type", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: oneHourBefore}, + }}, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - SetStatusCondition(&test.conditions, test.toAdd) + changed := SetStatusCondition(&test.conditions, test.toAdd) + if test.expectChanged != changed { + t.Errorf("expectChanged=%t != changed=%t", test.expectChanged, changed) + } if !reflect.DeepEqual(test.conditions, test.expected) { t.Error(test.conditions) } @@ -92,6 +113,7 @@ func TestRemoveStatusCondition(t *testing.T) { name string conditions []metav1.Condition conditionType string + expectRemoval bool expected []metav1.Condition }{ { @@ -102,6 +124,7 @@ func TestRemoveStatusCondition(t *testing.T) { {Type: "third"}, }, conditionType: "second", + expectRemoval: true, expected: []metav1.Condition{ {Type: "first"}, {Type: "third"}, @@ -131,7 +154,10 @@ func TestRemoveStatusCondition(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - RemoveStatusCondition(&test.conditions, test.conditionType) + removed := RemoveStatusCondition(&test.conditions, test.conditionType) + if test.expectRemoval != removed { + t.Errorf("expectRemoval=%t != removal=%t", test.expectRemoval, removed) + } if !reflect.DeepEqual(test.conditions, test.expected) { t.Error(test.conditions) } diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/amount.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/amount.go index a8866a43e10b0..2eebec667d3b7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } +// Mul multiplies the provided b to the current amount, or +// returns false if overflow or underflow would result. +func (a *int64Amount) Mul(b int64) bool { + switch { + case a.value == 0: + return true + case b == 0: + a.value = 0 + a.scale = 0 + return true + case a.scale == 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + a.value = c + case a.scale > 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = positiveScaleInt64(c, a.scale); !ok { + return false + } + a.value = c + default: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = negativeScaleInt64(c, -a.scale); !ok { + return false + } + a.value = c + } + return true +} + // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/amount_test.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/amount_test.go index 8217cb1399c2f..a6c4054b9899f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/amount_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/amount_test.go @@ -88,6 +88,49 @@ func TestInt64AmountAdd(t *testing.T) { } } } + +func TestInt64AmountMul(t *testing.T) { + for _, test := range []struct { + a int64Amount + b int64 + c int64Amount + ok bool + }{ + {int64Amount{value: 100, scale: 1}, 1000, int64Amount{value: 100000, scale: 1}, true}, + {int64Amount{value: 100, scale: -1}, 1000, int64Amount{value: 100000, scale: -1}, true}, + {int64Amount{value: 1, scale: 100}, 10, int64Amount{value: 1, scale: 100}, false}, + {int64Amount{value: 1, scale: -100}, 10, int64Amount{value: 1, scale: -100}, false}, + {int64Amount{value: -5, scale: 2}, 500, int64Amount{value: -2500, scale: 2}, true}, + {int64Amount{value: -5, scale: -2}, 500, int64Amount{value: -2500, scale: -2}, true}, + {int64Amount{value: 0, scale: 1}, 0, int64Amount{value: 0, scale: 1}, true}, + + {int64Amount{value: mostPositive, scale: -1}, 10, int64Amount{value: mostPositive, scale: -1}, false}, + {int64Amount{value: mostPositive, scale: -1}, 0, int64Amount{value: 0, scale: 0}, true}, + {int64Amount{value: mostPositive, scale: 0}, 1, int64Amount{value: mostPositive, scale: 0}, true}, + {int64Amount{value: mostPositive / 10, scale: 1}, 10, int64Amount{value: mostPositive / 10, scale: 1}, false}, + {int64Amount{value: mostPositive, scale: 0}, -1, int64Amount{value: -mostPositive, scale: 0}, true}, + {int64Amount{value: mostNegative, scale: 0}, 1, int64Amount{value: mostNegative, scale: 0}, true}, + {int64Amount{value: mostNegative, scale: 1}, 0, int64Amount{value: 0, scale: 0}, true}, + {int64Amount{value: mostNegative, scale: 1}, 1, int64Amount{value: mostNegative, scale: 1}, false}, + } { + c := test.a + ok := c.Mul(test.b) + if ok && !test.ok { + t.Errorf("unextected success: %v", c) + } else if !ok && test.ok { + t.Errorf("unexpeted failure: %v", c) + } else if ok { + if c != test.c { + t.Errorf("%v: unexpected result: %d", test, c) + } + } else { + if c != test.a { + t.Errorf("%v: overflow multiplication mutated source: %d", test, c) + } + } + } +} + func TestInt64AsCanonicalString(t *testing.T) { for _, test := range []struct { value int64 diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go index b47d554b3c57f..69f1bc336d36b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -592,6 +592,16 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } +// Mul multiplies the provided y to the current value. +// It will return false if the result is inexact. Otherwise, it will return true. +func (q *Quantity) Mul(y int64) bool { + q.s = "" + if q.d.Dec == nil && q.i.Mul(y) { + return true + } + return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() +} + // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { diff --git a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity_test.go b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity_test.go index 320477358c8ce..646caee7b78e3 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "math" + "math/big" "math/rand" "os" "strings" @@ -32,15 +33,29 @@ import ( inf "gopkg.in/inf.v0" ) +var ( + bigMostPositive = big.NewInt(mostPositive) + bigMostNegative = big.NewInt(mostNegative) +) + func dec(i int64, exponent int) infDecAmount { // See the below test-- scale is the negative of an exponent. return infDecAmount{inf.NewDec(i, inf.Scale(-exponent))} } +func bigDec(i *big.Int, exponent int) infDecAmount { + // See the below test-- scale is the negative of an exponent. + return infDecAmount{inf.NewDecBig(i, inf.Scale(-exponent))} +} + func decQuantity(i int64, exponent int, format Format) Quantity { return Quantity{d: dec(i, exponent), Format: format} } +func bigDecQuantity(i *big.Int, exponent int, format Format) Quantity { + return Quantity{d: bigDec(i, exponent), Format: format} +} + func intQuantity(i int64, exponent Scale, format Format) Quantity { return Quantity{i: int64Amount{value: i, scale: exponent}, Format: format} } @@ -67,6 +82,38 @@ func TestDec(t *testing.T) { } } +func TestBigDec(t *testing.T) { + table := []struct { + got infDecAmount + expect string + }{ + {bigDec(big.NewInt(1), 0), "1"}, + {bigDec(big.NewInt(1), 1), "10"}, + {bigDec(big.NewInt(5), 2), "500"}, + {bigDec(big.NewInt(8), 3), "8000"}, + {bigDec(big.NewInt(2), 0), "2"}, + {bigDec(big.NewInt(1), -1), "0.1"}, + {bigDec(big.NewInt(3), -2), "0.03"}, + {bigDec(big.NewInt(4), -3), "0.004"}, + {bigDec(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), 0), "9223372036854775808"}, + {bigDec(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), 1), "92233720368547758080"}, + {bigDec(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), 2), "922337203685477580800"}, + {bigDec(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), -1), "922337203685477580.8"}, + {bigDec(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), -2), "92233720368547758.08"}, + {bigDec(big.NewInt(0).Sub(bigMostNegative, big.NewInt(1)), 0), "-9223372036854775809"}, + {bigDec(big.NewInt(0).Sub(bigMostNegative, big.NewInt(1)), 1), "-92233720368547758090"}, + {bigDec(big.NewInt(0).Sub(bigMostNegative, big.NewInt(1)), 2), "-922337203685477580900"}, + {bigDec(big.NewInt(0).Sub(bigMostNegative, big.NewInt(1)), -1), "-922337203685477580.9"}, + {bigDec(big.NewInt(0).Sub(bigMostNegative, big.NewInt(1)), -2), "-92233720368547758.09"}, + } + + for _, item := range table { + if e, a := item.expect, item.got.Dec.String(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + } +} + // TestQuantityParseZero ensures that when a 0 quantity is passed, its string value is 0 func TestQuantityParseZero(t *testing.T) { zero := MustParse("0") @@ -1137,6 +1184,58 @@ func TestAdd(t *testing.T) { } } +func TestMul(t *testing.T) { + tests := []struct { + a Quantity + b int64 + expected Quantity + ok bool + }{ + {decQuantity(10, 0, DecimalSI), 10, decQuantity(100, 0, DecimalSI), true}, + {decQuantity(10, 0, DecimalSI), 1, decQuantity(10, 0, DecimalSI), true}, + {decQuantity(10, 0, BinarySI), 1, decQuantity(10, 0, BinarySI), true}, + {Quantity{Format: DecimalSI}, 50, decQuantity(0, 0, DecimalSI), true}, + {decQuantity(50, 0, DecimalSI), 0, decQuantity(0, 0, DecimalSI), true}, + {Quantity{Format: DecimalSI}, 0, decQuantity(0, 0, DecimalSI), true}, + + {decQuantity(10, 0, DecimalSI), -10, decQuantity(-100, 0, DecimalSI), true}, + {decQuantity(-10, 0, DecimalSI), 1, decQuantity(-10, 0, DecimalSI), true}, + {decQuantity(10, 0, BinarySI), -1, decQuantity(-10, 0, BinarySI), true}, + {decQuantity(-50, 0, DecimalSI), 0, decQuantity(0, 0, DecimalSI), true}, + {decQuantity(-50, 0, DecimalSI), -50, decQuantity(2500, 0, DecimalSI), true}, + {Quantity{Format: DecimalSI}, -50, decQuantity(0, 0, DecimalSI), true}, + {decQuantity(mostPositive, 0, DecimalSI), 0, decQuantity(0, 1, DecimalSI), true}, + {decQuantity(mostPositive, 0, DecimalSI), 1, decQuantity(mostPositive, 0, DecimalSI), true}, + {decQuantity(mostPositive, 0, DecimalSI), -1, decQuantity(-mostPositive, 0, DecimalSI), true}, + {decQuantity(mostPositive/2, 0, DecimalSI), 2, decQuantity((mostPositive/2)*2, 0, DecimalSI), true}, + {decQuantity(mostPositive/-2, 0, DecimalSI), -2, decQuantity((mostPositive/2)*2, 0, DecimalSI), true}, + {decQuantity(mostPositive, 0, DecimalSI), 2, + bigDecQuantity(big.NewInt(0).Mul(bigMostPositive, big.NewInt(2)), 0, DecimalSI), false}, + {decQuantity(mostPositive, 0, DecimalSI), 10, decQuantity(mostPositive, 1, DecimalSI), false}, + {decQuantity(mostPositive, 0, DecimalSI), -10, decQuantity(-mostPositive, 1, DecimalSI), false}, + {decQuantity(mostNegative, 0, DecimalSI), 0, decQuantity(0, 1, DecimalSI), true}, + {decQuantity(mostNegative, 0, DecimalSI), 1, decQuantity(mostNegative, 0, DecimalSI), true}, + {decQuantity(mostNegative, 0, DecimalSI), -1, + bigDecQuantity(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), 0, DecimalSI), false}, + {decQuantity(mostNegative/2, 0, DecimalSI), 2, decQuantity(mostNegative, 0, DecimalSI), true}, + {decQuantity(mostNegative/-2, 0, DecimalSI), -2, decQuantity(mostNegative, 0, DecimalSI), true}, + {decQuantity(mostNegative, 0, DecimalSI), 2, + bigDecQuantity(big.NewInt(0).Mul(bigMostNegative, big.NewInt(2)), 0, DecimalSI), false}, + {decQuantity(mostNegative, 0, DecimalSI), 10, decQuantity(mostNegative, 1, DecimalSI), false}, + {decQuantity(mostNegative, 0, DecimalSI), -10, + bigDecQuantity(big.NewInt(0).Add(bigMostPositive, big.NewInt(1)), 1, DecimalSI), false}, + } + + for i, test := range tests { + if ok := test.a.Mul(test.b); test.ok != ok { + t.Errorf("[%d] Expected ok: %t, got ok: %t", i, test.ok, ok) + } + if test.a.Cmp(test.expected) != 0 { + t.Errorf("[%d] Expected %q, got %q", i, test.expected.AsDec().String(), test.a.AsDec().String()) + } + } +} + func TestAddSubRoundTrip(t *testing.T) { for k := -10; k <= 10; k++ { q := Quantity{Format: DecimalSI} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 32f075782a9ab..a32fce5a0c147 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -17,6 +17,7 @@ limitations under the License. package httpstream import ( + "errors" "fmt" "io" "net/http" @@ -95,6 +96,26 @@ type Stream interface { Identifier() uint32 } +// UpgradeFailureError encapsulates the cause for why the streaming +// upgrade request failed. Implements error interface. +type UpgradeFailureError struct { + Cause error +} + +func (u *UpgradeFailureError) Error() string { + return fmt.Sprintf("unable to upgrade streaming request: %s", u.Cause) +} + +// IsUpgradeFailure returns true if the passed error is (or wrapped error contains) +// the UpgradeFailureError. +func IsUpgradeFailure(err error) bool { + if err == nil { + return false + } + var upgradeErr *UpgradeFailureError + return errors.As(err, &upgradeErr) +} + // IsUpgradeRequest returns true if the given request is a connection upgrade request func IsUpgradeRequest(req *http.Request) bool { for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream_test.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream_test.go index e988bce2b3142..11fb928634eed 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/httpstream_test.go @@ -17,6 +17,8 @@ limitations under the License. package httpstream import ( + "errors" + "fmt" "net/http" "reflect" "testing" @@ -129,3 +131,40 @@ func TestHandshake(t *testing.T) { } } } + +func TestIsUpgradeFailureError(t *testing.T) { + testCases := map[string]struct { + err error + expected bool + }{ + "nil error should return false": { + err: nil, + expected: false, + }, + "Non-upgrade error should return false": { + err: fmt.Errorf("this is not an upgrade error"), + expected: false, + }, + "UpgradeFailure error should return true": { + err: &UpgradeFailureError{}, + expected: true, + }, + "Wrapped Non-UpgradeFailure error should return false": { + err: fmt.Errorf("%s: %w", "first error", errors.New("Non-upgrade error")), + expected: false, + }, + "Wrapped UpgradeFailure error should return true": { + err: fmt.Errorf("%s: %w", "first error", &UpgradeFailureError{}), + expected: true, + }, + } + + for name, test := range testCases { + t.Run(name, func(t *testing.T) { + actual := IsUpgradeFailure(test.err) + if test.expected != actual { + t.Errorf("expected upgrade failure %t, got %t", test.expected, actual) + } + }) + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index 7fe52ee568edc..c78326fa3b5db 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/httpstream" utilnet "k8s.io/apimachinery/pkg/util/net" + apiproxy "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apimachinery/third_party/forked/golang/netutil" ) @@ -68,6 +69,10 @@ type SpdyRoundTripper struct { // pingPeriod is a period for sending Ping frames over established // connections. pingPeriod time.Duration + + // upgradeTransport is an optional substitute for dialing if present. This field is + // mutually exclusive with the "tlsConfig", "Dialer", and "proxier". + upgradeTransport http.RoundTripper } var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} @@ -76,43 +81,61 @@ var _ utilnet.Dialer = &SpdyRoundTripper{} // NewRoundTripper creates a new SpdyRoundTripper that will use the specified // tlsConfig. -func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper { +func NewRoundTripper(tlsConfig *tls.Config) (*SpdyRoundTripper, error) { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, + TLS: tlsConfig, + UpgradeTransport: nil, }) } // NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the // specified tlsConfig and proxy func. -func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper { +func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) (*SpdyRoundTripper, error) { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxier, + TLS: tlsConfig, + Proxier: proxier, + UpgradeTransport: nil, }) } // NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified -// configuration. -func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper { +// configuration. Returns an error if the SpdyRoundTripper is misconfigured. +func NewRoundTripperWithConfig(cfg RoundTripperConfig) (*SpdyRoundTripper, error) { + // Process UpgradeTransport, which is mutually exclusive to TLSConfig and Proxier. + if cfg.UpgradeTransport != nil { + if cfg.TLS != nil || cfg.Proxier != nil { + return nil, fmt.Errorf("SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier") + } + tlsConfig, err := utilnet.TLSClientConfig(cfg.UpgradeTransport) + if err != nil { + return nil, fmt.Errorf("SpdyRoundTripper: Unable to retrieve TLSConfig from UpgradeTransport: %v", err) + } + cfg.TLS = tlsConfig + } if cfg.Proxier == nil { cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } return &SpdyRoundTripper{ - tlsConfig: cfg.TLS, - proxier: cfg.Proxier, - pingPeriod: cfg.PingPeriod, - } + tlsConfig: cfg.TLS, + proxier: cfg.Proxier, + pingPeriod: cfg.PingPeriod, + upgradeTransport: cfg.UpgradeTransport, + }, nil } // RoundTripperConfig is a set of options for an SpdyRoundTripper. type RoundTripperConfig struct { - // TLS configuration used by the round tripper. + // TLS configuration used by the round tripper if UpgradeTransport not present. TLS *tls.Config // Proxier is a proxy function invoked on each request. Optional. Proxier func(*http.Request) (*url.URL, error) // PingPeriod is a period for sending SPDY Pings on the connection. // Optional. PingPeriod time.Duration + // UpgradeTransport is a subtitute transport used for dialing. If set, + // this field will be used instead of "TLS" and "Proxier" for connection creation. + // Optional. + UpgradeTransport http.RoundTripper } // TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during @@ -123,7 +146,13 @@ func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { // Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { - conn, err := s.dial(req) + var conn net.Conn + var err error + if s.upgradeTransport != nil { + conn, err = apiproxy.DialURL(req.Context(), req.URL, s.upgradeTransport) + } else { + conn, err = s.dial(req) + } if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go index b2c2b88513a42..de88f4e6071e8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go @@ -25,7 +25,9 @@ import ( "net/http" "net/http/httptest" "net/url" + "reflect" "strconv" + "strings" "testing" "github.com/armon/go-socks5" @@ -324,7 +326,10 @@ func TestRoundTripAndNewConnection(t *testing.T) { t.Fatalf("error creating request: %s", err) } - spdyTransport := NewRoundTripper(testCase.clientTLS) + spdyTransport, err := NewRoundTripper(testCase.clientTLS) + if err != nil { + t.Fatalf("error creating SpdyRoundTripper: %v", err) + } var proxierCalled bool var proxyCalledWithHost string @@ -428,6 +433,74 @@ func TestRoundTripAndNewConnection(t *testing.T) { } } +// Tests SpdyRoundTripper constructors +func TestRoundTripConstuctor(t *testing.T) { + testCases := map[string]struct { + tlsConfig *tls.Config + proxier func(req *http.Request) (*url.URL, error) + upgradeTransport http.RoundTripper + expectedTLSConfig *tls.Config + errMsg string + }{ + "Basic TLSConfig; no error": { + tlsConfig: &tls.Config{InsecureSkipVerify: true}, + expectedTLSConfig: &tls.Config{InsecureSkipVerify: true}, + upgradeTransport: nil, + }, + "Basic TLSConfig and Proxier: no error": { + tlsConfig: &tls.Config{InsecureSkipVerify: true}, + proxier: func(req *http.Request) (*url.URL, error) { return nil, nil }, + expectedTLSConfig: &tls.Config{InsecureSkipVerify: true}, + upgradeTransport: nil, + }, + "TLSConfig with UpgradeTransport: error": { + tlsConfig: &tls.Config{InsecureSkipVerify: true}, + upgradeTransport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, + expectedTLSConfig: &tls.Config{InsecureSkipVerify: true}, + errMsg: "SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier", + }, + "Proxier with UpgradeTransport: error": { + proxier: func(req *http.Request) (*url.URL, error) { return nil, nil }, + upgradeTransport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, + expectedTLSConfig: &tls.Config{InsecureSkipVerify: true}, + errMsg: "SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier", + }, + "Only UpgradeTransport: no error": { + upgradeTransport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, + expectedTLSConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + spdyRoundTripper, err := NewRoundTripperWithConfig( + RoundTripperConfig{ + TLS: testCase.tlsConfig, + Proxier: testCase.proxier, + UpgradeTransport: testCase.upgradeTransport, + }, + ) + if testCase.errMsg != "" { + if err == nil { + t.Fatalf("expected error but received none") + } + if !strings.Contains(err.Error(), testCase.errMsg) { + t.Fatalf("expected error message (%s), got (%s)", err.Error(), testCase.errMsg) + } + } + if testCase.errMsg == "" { + if err != nil { + t.Fatalf("unexpected error received: %v", err) + } + actualTLSConfig := spdyRoundTripper.TLSClientConfig() + if !reflect.DeepEqual(testCase.expectedTLSConfig, actualTLSConfig) { + t.Errorf("expected TLSConfig (%v), got (%v)", + testCase.expectedTLSConfig, actualTLSConfig) + } + } + }) + } +} + type Interceptor struct { Authorization socks5.AuthContext proxyCalledWithHost *string @@ -544,7 +617,10 @@ func TestRoundTripSocks5AndNewConnection(t *testing.T) { t.Fatalf("error creating request: %s", err) } - spdyTransport := NewRoundTripper(testCase.clientTLS) + spdyTransport, err := NewRoundTripper(testCase.clientTLS) + if err != nil { + t.Fatalf("error creating SpdyRoundTripper: %v", err) + } var proxierCalled bool var proxyCalledWithHost string @@ -704,7 +780,10 @@ func TestRoundTripPassesContextToDialer(t *testing.T) { cancel() req, err := http.NewRequestWithContext(ctx, "GET", u, nil) require.NoError(t, err) - spdyTransport := NewRoundTripper(&tls.Config{}) + spdyTransport, err := NewRoundTripper(&tls.Config{}) + if err != nil { + t.Fatalf("error creating SpdyRoundTripper: %v", err) + } _, err = spdyTransport.Dial(req) assert.EqualError(t, err, "dial tcp 127.0.0.1:1233: operation was canceled") }) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go index d153070cedf9e..7cfdd06321708 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go @@ -32,6 +32,8 @@ import ( "k8s.io/klog/v2" ) +const WebSocketProtocolHeader = "Sec-Websocket-Protocol" + // The Websocket subprotocol "channel.k8s.io" prepends each binary message with a byte indicating // the channel number (zero indexed) the message was sent on. Messages in both directions should // prefix their messages with this channel byte. When used for remote execution, the channel numbers @@ -87,6 +89,23 @@ func IsWebSocketRequest(req *http.Request) bool { return httpstream.IsUpgradeRequest(req) } +// IsWebSocketRequestWithStreamCloseProtocol returns true if the request contains headers +// identifying that it is requesting a websocket upgrade with a remotecommand protocol +// version that supports the "CLOSE" signal; false otherwise. +func IsWebSocketRequestWithStreamCloseProtocol(req *http.Request) bool { + if !IsWebSocketRequest(req) { + return false + } + requestedProtocols := strings.TrimSpace(req.Header.Get(WebSocketProtocolHeader)) + for _, requestedProtocol := range strings.Split(requestedProtocols, ",") { + if protocolSupportsStreamClose(strings.TrimSpace(requestedProtocol)) { + return true + } + } + + return false +} + // IgnoreReceives reads from a WebSocket until it is closed, then returns. If timeout is set, the // read and write deadlines are pushed every time a new message is received. func IgnoreReceives(ws *websocket.Conn, timeout time.Duration) { @@ -168,15 +187,46 @@ func (conn *Conn) SetIdleTimeout(duration time.Duration) { conn.timeout = duration } +// SetWriteDeadline sets a timeout on writing to the websocket connection. The +// passed "duration" identifies how far into the future the write must complete +// by before the timeout fires. +func (conn *Conn) SetWriteDeadline(duration time.Duration) { + conn.ws.SetWriteDeadline(time.Now().Add(duration)) //nolint:errcheck +} + // Open the connection and create channels for reading and writing. It returns // the selected subprotocol, a slice of channels and an error. func (conn *Conn) Open(w http.ResponseWriter, req *http.Request) (string, []io.ReadWriteCloser, error) { + // serveHTTPComplete is channel that is closed/selected when "websocket#ServeHTTP" finishes. + serveHTTPComplete := make(chan struct{}) + // Ensure panic in spawned goroutine is propagated into the parent goroutine. + panicChan := make(chan any, 1) go func() { - defer runtime.HandleCrash() - defer conn.Close() + // If websocket server returns, propagate panic if necessary. Otherwise, + // signal HTTPServe finished by closing "serveHTTPComplete". + defer func() { + if p := recover(); p != nil { + panicChan <- p + } else { + close(serveHTTPComplete) + } + }() websocket.Server{Handshake: conn.handshake, Handler: conn.handle}.ServeHTTP(w, req) }() - <-conn.ready + + // In normal circumstances, "websocket.Server#ServeHTTP" calls "initialize" which closes + // "conn.ready" and then blocks until serving is complete. + select { + case <-conn.ready: + klog.V(8).Infof("websocket server initialized--serving") + case <-serveHTTPComplete: + // websocket server returned before completing initialization; cleanup and return error. + conn.closeNonThreadSafe() //nolint:errcheck + return "", nil, fmt.Errorf("websocket server finished before becoming ready") + case p := <-panicChan: + panic(p) + } + rwc := make([]io.ReadWriteCloser, len(conn.channels)) for i := range conn.channels { rwc[i] = conn.channels[i] @@ -225,14 +275,23 @@ func (conn *Conn) resetTimeout() { } } -// Close is only valid after Open has been called -func (conn *Conn) Close() error { - <-conn.ready +// closeNonThreadSafe cleans up by closing streams and the websocket +// connection *without* waiting for the "ready" channel. +func (conn *Conn) closeNonThreadSafe() error { for _, s := range conn.channels { s.Close() } - conn.ws.Close() - return nil + var err error + if conn.ws != nil { + err = conn.ws.Close() + } + return err +} + +// Close is only valid after Open has been called +func (conn *Conn) Close() error { + <-conn.ready + return conn.closeNonThreadSafe() } // protocolSupportsStreamClose returns true if the passed protocol @@ -244,8 +303,8 @@ func protocolSupportsStreamClose(protocol string) bool { // handle implements a websocket handler. func (conn *Conn) handle(ws *websocket.Conn) { - defer conn.Close() conn.initialize(ws) + defer conn.Close() supportsStreamClose := protocolSupportsStreamClose(conn.selectedProtocol) for { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn_test.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn_test.go index 8d9f5d5d41734..e4a88a1a8cdb4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn_test.go @@ -25,6 +25,8 @@ import ( "sync" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/net/websocket" ) @@ -271,3 +273,146 @@ func TestVersionedConn(t *testing.T) { }() } } + +func TestIsWebSocketRequestWithStreamCloseProtocol(t *testing.T) { + tests := map[string]struct { + headers map[string]string + expected bool + }{ + "No headers returns false": { + headers: map[string]string{}, + expected: false, + }, + "Only connection upgrade header is false": { + headers: map[string]string{ + "Connection": "upgrade", + }, + expected: false, + }, + "Only websocket upgrade header is false": { + headers: map[string]string{ + "Upgrade": "websocket", + }, + expected: false, + }, + "Only websocket and connection upgrade headers is false": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + }, + expected: false, + }, + "Missing connection/upgrade header is false": { + headers: map[string]string{ + "Upgrade": "websocket", + WebSocketProtocolHeader: "v5.channel.k8s.io", + }, + expected: false, + }, + "Websocket connection upgrade headers with v5 protocol is true": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + WebSocketProtocolHeader: "v5.channel.k8s.io", + }, + expected: true, + }, + "Websocket connection upgrade headers with wrong case v5 protocol is false": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + WebSocketProtocolHeader: "v5.CHANNEL.k8s.io", // header value is case-sensitive + }, + expected: false, + }, + "Websocket connection upgrade headers with v4 protocol is false": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + WebSocketProtocolHeader: "v4.channel.k8s.io", + }, + expected: false, + }, + "Websocket connection upgrade headers with multiple protocols but missing v5 is false": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + WebSocketProtocolHeader: "v4.channel.k8s.io,v3.channel.k8s.io,v2.channel.k8s.io", + }, + expected: false, + }, + "Websocket connection upgrade headers with multiple protocols including v5 and spaces is true": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + WebSocketProtocolHeader: "v5.channel.k8s.io, v4.channel.k8s.io", + }, + expected: true, + }, + "Websocket connection upgrade headers with multiple protocols out of order including v5 and spaces is true": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + WebSocketProtocolHeader: "v4.channel.k8s.io, v5.channel.k8s.io, v3.channel.k8s.io", + }, + expected: true, + }, + + "Websocket connection upgrade headers key is case-insensitive": { + headers: map[string]string{ + "Connection": "upgrade", + "Upgrade": "websocket", + "sec-websocket-protocol": "v4.channel.k8s.io, v5.channel.k8s.io, v3.channel.k8s.io", + }, + expected: true, + }, + } + + for name, test := range tests { + req, err := http.NewRequest("GET", "http://www.example.com/", nil) + require.NoError(t, err) + for key, value := range test.headers { + req.Header.Add(key, value) + } + actual := IsWebSocketRequestWithStreamCloseProtocol(req) + assert.Equal(t, test.expected, actual, "%s: expected (%t), got (%t)", name, test.expected, actual) + } +} + +func TestProtocolSupportsStreamClose(t *testing.T) { + tests := map[string]struct { + protocol string + expected bool + }{ + "empty protocol returns false": { + protocol: "", + expected: false, + }, + "not binary protocol returns false": { + protocol: "base64.channel.k8s.io", + expected: false, + }, + "V1 protocol returns false": { + protocol: "channel.k8s.io", + expected: false, + }, + "V4 protocol returns false": { + protocol: "v4.channel.k8s.io", + expected: false, + }, + "V5 protocol returns true": { + protocol: "v5.channel.k8s.io", + expected: true, + }, + "V5 protocol wrong case returns false": { + protocol: "V5.channel.K8S.io", + expected: false, + }, + } + + for name, test := range tests { + actual := protocolSupportsStreamClose(test.protocol) + assert.Equal(t, test.expected, actual, + "%s: expected (%t), got (%t)", name, test.expected, actual) + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go b/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go index b4cbe82459c48..94a1b7f881985 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go @@ -56,10 +56,10 @@ func TestPortRange(t *testing.T) { pr := &PortRange{} var f flag.Value = pr err := f.Set(tc.input) - if err != nil && tc.success == true { + if err != nil && tc.success { t.Errorf("expected success, got %q", err) continue - } else if err == nil && tc.success == false { + } else if err == nil && !tc.success { t.Errorf("expected failure %#v", testCases[i]) continue } else if tc.success { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go index 4ceb2e06eaba7..e5196d1ee831a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -29,12 +29,12 @@ import ( "k8s.io/klog/v2" ) -// dialURL will dial the specified URL using the underlying dialer held by the passed +// DialURL will dial the specified URL using the underlying dialer held by the passed // RoundTripper. The primary use of this method is to support proxying upgradable connections. // For this reason this method will prefer to negotiate http/1.1 if the URL scheme is https. // If you wish to ensure ALPN negotiates http2 then set NextProto=[]string{"http2"} in the // TLSConfig of the http.Transport -func dialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) { +func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) { dialAddr := netutil.CanonicalAddr(url) dialer, err := utilnet.DialerFor(transport) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go index 32e951e61cae7..488e878b72360 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial_test.go @@ -143,7 +143,7 @@ func TestDialURL(t *testing.T) { u, _ := url.Parse(ts.URL) _, p, _ := net.SplitHostPort(u.Host) u.Host = net.JoinHostPort("127.0.0.1", p) - conn, err := dialURL(context.Background(), u, transport) + conn, err := DialURL(context.Background(), u, transport) // Make sure dialing doesn't mutate the transport's TLSConfig if !reflect.DeepEqual(tc.TLSConfig, tlsConfigCopy) { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index ac2ada5472c3d..76acdfb4aca47 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -492,7 +492,7 @@ func getResponse(r io.Reader) (*http.Response, []byte, error) { // dial dials the backend at req.URL and writes req to it. func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) { - conn, err := dialURL(req.Context(), req.URL, transport) + conn, err := DialURL(req.Context(), req.URL, transport) if err != nil { return nil, fmt.Errorf("error dialing backend: %v", err) } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725caf091..3674914f70189 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime_test.go b/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime_test.go index 2368a513b9143..c886b6826ffc6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime_test.go @@ -24,7 +24,9 @@ import ( "os" "regexp" "strings" + "sync" "testing" + "time" ) func TestHandleCrash(t *testing.T) { @@ -156,3 +158,27 @@ func captureStderr(f func()) (string, error) { return <-resultCh, nil } + +func Test_rudimentaryErrorBackoff_OnError_ParallelSleep(t *testing.T) { + r := &rudimentaryErrorBackoff{ + minPeriod: time.Second, + } + + start := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < 30; i++ { + wg.Add(1) + go func() { + <-start + r.OnError(nil) // input error is ignored + wg.Done() + }() + } + st := time.Now() + close(start) + wg.Wait() + + if since := time.Since(st); since > 5*time.Second { + t.Errorf("OnError slept for too long: %s", since) + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/version/version.go b/staging/src/k8s.io/apimachinery/pkg/util/version/version.go index 4c6195695336e..2292ba13765f4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/version/version.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/version/version.go @@ -18,6 +18,7 @@ package version import ( "bytes" + "errors" "fmt" "regexp" "strconv" @@ -85,6 +86,47 @@ func parse(str string, semver bool) (*Version, error) { return v, nil } +// HighestSupportedVersion returns the highest supported version +// This function assumes that the highest supported version must be v1.x. +func HighestSupportedVersion(versions []string) (*Version, error) { + if len(versions) == 0 { + return nil, errors.New("empty array for supported versions") + } + + var ( + highestSupportedVersion *Version + theErr error + ) + + for i := len(versions) - 1; i >= 0; i-- { + currentHighestVer, err := ParseGeneric(versions[i]) + if err != nil { + theErr = err + continue + } + + if currentHighestVer.Major() > 1 { + continue + } + + if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { + highestSupportedVersion = currentHighestVer + } + } + + if highestSupportedVersion == nil { + return nil, fmt.Errorf( + "could not find a highest supported version from versions (%v) reported: %+v", + versions, theErr) + } + + if highestSupportedVersion.Major() != 1 { + return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion) + } + + return highestSupportedVersion, nil +} + // ParseGeneric parses a "generic" version string. The version string must consist of two // or more dot-separated numeric fields (the first of which can't have leading zeroes), // followed by arbitrary uninterpreted data (which need not be separated from the final diff --git a/staging/src/k8s.io/apimachinery/pkg/util/version/version_test.go b/staging/src/k8s.io/apimachinery/pkg/util/version/version_test.go index aa0675f7f121e..4d36bf3c12120 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/version/version_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/version/version_test.go @@ -346,3 +346,109 @@ func TestComponents(t *testing.T) { } } } + +func TestHighestSupportedVersion(t *testing.T) { + testCases := []struct { + versions []string + expectedHighestSupportedVersion string + shouldFail bool + }{ + { + versions: []string{"v1.0.0"}, + expectedHighestSupportedVersion: "1.0.0", + shouldFail: false, + }, + { + versions: []string{"0.3.0"}, + shouldFail: true, + }, + { + versions: []string{"0.2.0"}, + shouldFail: true, + }, + { + versions: []string{"1.0.0"}, + expectedHighestSupportedVersion: "1.0.0", + shouldFail: false, + }, + { + versions: []string{"v0.3.0"}, + shouldFail: true, + }, + { + versions: []string{"v0.2.0"}, + shouldFail: true, + }, + { + versions: []string{"0.2.0", "v0.3.0"}, + shouldFail: true, + }, + { + versions: []string{"0.2.0", "v1.0.0"}, + expectedHighestSupportedVersion: "1.0.0", + shouldFail: false, + }, + { + versions: []string{"0.2.0", "v1.2.3"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"v1.2.3", "v0.3.0"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"v1.2.3", "v0.3.0", "2.0.1"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"v1.2.3", "4.9.12", "v0.3.0", "2.0.1"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{"4.9.12", "2.0.1"}, + expectedHighestSupportedVersion: "", + shouldFail: true, + }, + { + versions: []string{"v1.2.3", "boo", "v0.3.0", "2.0.1"}, + expectedHighestSupportedVersion: "1.2.3", + shouldFail: false, + }, + { + versions: []string{}, + expectedHighestSupportedVersion: "", + shouldFail: true, + }, + { + versions: []string{"var", "boo", "foo"}, + expectedHighestSupportedVersion: "", + shouldFail: true, + }, + } + + for _, tc := range testCases { + // Arrange & Act + actual, err := HighestSupportedVersion(tc.versions) + + // Assert + if tc.shouldFail && err == nil { + t.Fatalf("expecting highestSupportedVersion to fail, but got nil error for testcase: %#v", tc) + } + if !tc.shouldFail && err != nil { + t.Fatalf("unexpected error during ValidatePlugin for testcase: %#v\r\n err:%v", tc, err) + } + if tc.expectedHighestSupportedVersion != "" { + result, err := actual.Compare(tc.expectedHighestSupportedVersion) + if err != nil { + t.Fatalf("comparison failed with %v for testcase %#v", err, tc) + } + if result != 0 { + t.Fatalf("expectedHighestSupportedVersion %v, but got %v for tc: %#v", tc.expectedHighestSupportedVersion, actual, tc) + } + } + } +} diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index 41bf209413c1f..d8982bd74c5d3 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -2,14 +2,14 @@ module k8s.io/apiserver -go 1.20 +go 1.21.3 require ( github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-systemd/v22 v22.5.0 - github.com/emicklei/go-restful/v3 v3.9.0 + github.com/emicklei/go-restful/v3 v3.11.0 github.com/evanphx/json-patch v4.12.0+incompatible - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/gogo/protobuf v1.3.2 github.com/google/cel-go v0.17.6 github.com/google/gnostic-models v0.6.8 @@ -18,26 +18,27 @@ require ( github.com/google/uuid v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 go.etcd.io/etcd/api/v3 v3.5.9 go.etcd.io/etcd/client/pkg/v3 v3.5.9 go.etcd.io/etcd/client/v3 v3.5.9 go.etcd.io/etcd/server/v3 v3.5.9 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/trace v1.10.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 + go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 + go.opentelemetry.io/otel/sdk v1.19.0 + go.opentelemetry.io/otel/trace v1.19.0 go.uber.org/zap v1.19.0 - golang.org/x/crypto v0.11.0 - golang.org/x/net v0.13.0 - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.10.0 + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 - google.golang.org/grpc v1.54.0 + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e + google.golang.org/grpc v1.58.2 google.golang.org/protobuf v1.31.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/square/go-jose.v2 v2.6.0 @@ -47,9 +48,9 @@ require ( k8s.io/component-base v0.0.0 k8s.io/klog/v2 v2.100.1 k8s.io/kms v0.0.0 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/structured-merge-diff/v4 v4.3.0 sigs.k8s.io/yaml v1.3.0 @@ -79,7 +80,7 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect @@ -87,9 +88,9 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect @@ -107,19 +108,18 @@ require ( go.etcd.io/etcd/client/v2 v2.305.9 // indirect go.etcd.io/etcd/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/raft/v3 v3.5.9 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum index 60ab36bdc4111..65a69560ff355 100644 --- a/staging/src/k8s.io/apiserver/go.sum +++ b/staging/src/k8s.io/apiserver/go.sum @@ -1,168 +1,135 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -176,23 +143,13 @@ github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqy github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= @@ -209,27 +166,22 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -257,40 +209,18 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= @@ -298,36 +228,18 @@ github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulN github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -338,11 +250,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -354,8 +263,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -373,6 +280,7 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -385,10 +293,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -408,7 +316,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -418,7 +325,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -438,16 +344,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= @@ -466,32 +370,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BC go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -507,42 +403,17 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -552,254 +423,97 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -816,24 +530,16 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go index 25ee108ea95cb..b7b589d273acb 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go @@ -141,6 +141,7 @@ type CompilationResult struct { Program cel.Program Error *apiservercel.Error ExpressionAccessor ExpressionAccessor + OutputType *cel.Type } // Compiler provides a CEL expression compiler configured with the desired admission related CEL variables and @@ -214,6 +215,7 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op return CompilationResult{ Program: prog, ExpressionAccessor: expressionAccessor, + OutputType: ast.OutputType(), } } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 38b80a304aad1..646c640fcaeac 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -23,6 +23,7 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" v1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" @@ -69,8 +70,8 @@ func (c *CompositedCompiler) CompileAndStoreVariables(variables []NamedExpressio } func (c *CompositedCompiler) CompileAndStoreVariable(variable NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult { - c.CompositionEnv.AddField(variable.GetName()) result := c.Compiler.CompileCELExpression(variable, options, mode) + c.CompositionEnv.AddField(variable.GetName(), result.OutputType) c.CompositionEnv.CompiledVariables[variable.GetName()] = result return result } @@ -90,8 +91,8 @@ type CompositionEnv struct { CompiledVariables map[string]CompilationResult } -func (c *CompositionEnv) AddField(name string) { - c.MapType.Fields[name] = apiservercel.NewDeclField(name, apiservercel.DynType, true, nil, nil) +func (c *CompositionEnv) AddField(name string, celType *cel.Type) { + c.MapType.Fields[name] = apiservercel.NewDeclField(name, convertCelTypeToDeclType(celType), true, nil, nil) } func NewCompositionEnv(typeName string, baseEnvSet *environment.EnvSet) (*CompositionEnv, error) { @@ -196,3 +197,48 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { } return v } + +// convertCelTypeToDeclType converts a cel.Type to DeclType, for the use of +// the TypeProvider and the cost estimator. +// List and map types are created on-demand with their parameters converted recursively. +func convertCelTypeToDeclType(celType *cel.Type) *apiservercel.DeclType { + if celType == nil { + return apiservercel.DynType + } + switch celType { + case cel.AnyType: + return apiservercel.AnyType + case cel.BoolType: + return apiservercel.BoolType + case cel.BytesType: + return apiservercel.BytesType + case cel.DoubleType: + return apiservercel.DoubleType + case cel.DurationType: + return apiservercel.DurationType + case cel.IntType: + return apiservercel.IntType + case cel.NullType: + return apiservercel.NullType + case cel.StringType: + return apiservercel.StringType + case cel.TimestampType: + return apiservercel.TimestampType + case cel.UintType: + return apiservercel.UintType + default: + if celType.HasTrait(traits.ContainerType) && celType.HasTrait(traits.IndexerType) { + parameters := celType.Parameters() + switch len(parameters) { + case 1: + elemType := convertCelTypeToDeclType(parameters[0]) + return apiservercel.NewListType(elemType, -1) + case 2: + keyType := convertCelTypeToDeclType(parameters[0]) + valueType := convertCelTypeToDeclType(parameters[1]) + return apiservercel.NewMapType(keyType, valueType, -1) + } + } + return apiservercel.DynType + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition_test.go index ebbb51bbbb71f..aedc7c969f021 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/composition_test.go @@ -70,7 +70,7 @@ func TestCompositedPolicies(t *testing.T) { expectedResult: true, }, { - name: "delayed compile error", + name: "early compile error", variables: []NamedExpressionAccessor{ &testVariable{ name: "name", @@ -80,20 +80,20 @@ func TestCompositedPolicies(t *testing.T) { attributes: endpointCreateAttributes(), expression: "variables.name == 'endpoints1'", expectErr: true, - expectedErrorMessage: `composited variable "name" fails to compile:`, + expectedErrorMessage: `found no matching overload for '_==_' applied to '(int, string)'`, }, { name: "delayed eval error", variables: []NamedExpressionAccessor{ &testVariable{ - name: "name", - expression: "object.spec.subsets[114514].addresses.size()", // array index out of bound + name: "count", + expression: "object.subsets[114514].addresses.size()", // array index out of bound }, }, attributes: endpointCreateAttributes(), - expression: "variables.name == 'endpoints1'", + expression: "variables.count == 810", expectErr: true, - expectedErrorMessage: `composited variable "name" fails to evaluate:`, + expectedErrorMessage: `composited variable "count" fails to evaluate: index out of bounds: 114514`, }, { name: "out of budget during lazy evaluation", @@ -123,6 +123,68 @@ func TestCompositedPolicies(t *testing.T) { expectedResult: true, runtimeCostBudget: 10, // enough for one lazy evaluation but not two, should pass }, + { + name: "single boolean variable in expression", + variables: []NamedExpressionAccessor{ + &testVariable{ + name: "fortuneTelling", + expression: "true", + }, + }, + attributes: endpointCreateAttributes(), + expression: "variables.fortuneTelling", + expectedResult: true, + }, + { + name: "variable of a list", + variables: []NamedExpressionAccessor{ + &testVariable{ + name: "list", + expression: "[1, 2, 3, 4]", + }, + }, + attributes: endpointCreateAttributes(), + expression: "variables.list.sum() == 10", + expectedResult: true, + }, + { + name: "variable of a map", + variables: []NamedExpressionAccessor{ + &testVariable{ + name: "dict", + expression: `{"foo": "bar"}`, + }, + }, + attributes: endpointCreateAttributes(), + expression: "variables.dict['foo'].contains('bar')", + expectedResult: true, + }, + { + name: "variable of a list but confused as a map", + variables: []NamedExpressionAccessor{ + &testVariable{ + name: "list", + expression: "[1, 2, 3, 4]", + }, + }, + attributes: endpointCreateAttributes(), + expression: "variables.list['invalid'] == 'invalid'", + expectErr: true, + expectedErrorMessage: "found no matching overload for '_[_]' applied to '(list(int), string)'", + }, + { + name: "list of strings, but element is confused as an integer", + variables: []NamedExpressionAccessor{ + &testVariable{ + name: "list", + expression: "['1', '2', '3', '4']", + }, + }, + attributes: endpointCreateAttributes(), + expression: "variables.list[0] == 1", + expectErr: true, + expectedErrorMessage: "found no matching overload for '_==_' applied to '(string, int)'", + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/load/load.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/load/load.go new file mode 100644 index 0000000000000..575b7c49f0ec5 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/load/load.go @@ -0,0 +1,82 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package load + +import ( + "fmt" + "io" + "os" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + api "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/apis/apiserver/install" + externalapi "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme, serializer.EnableStrict) +) + +func init() { + install.Install(scheme) +} + +func LoadFromFile(file string) (*api.AuthorizationConfiguration, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + return LoadFromData(data) +} + +func LoadFromReader(reader io.Reader) (*api.AuthorizationConfiguration, error) { + if reader == nil { + // no reader specified, use default config + return LoadFromData(nil) + } + + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + return LoadFromData(data) +} + +func LoadFromData(data []byte) (*api.AuthorizationConfiguration, error) { + if len(data) == 0 { + // no config provided, return default + externalConfig := &externalapi.AuthorizationConfiguration{} + scheme.Default(externalConfig) + internalConfig := &api.AuthorizationConfiguration{} + if err := scheme.Convert(externalConfig, internalConfig, nil); err != nil { + return nil, err + } + return internalConfig, nil + } + + decodedObj, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + return nil, err + } + configuration, ok := decodedObj.(*api.AuthorizationConfiguration) + if !ok { + return nil, fmt.Errorf("expected AuthorizationConfiguration, got %T", decodedObj) + } + return configuration, nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/load/load_test.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/load/load_test.go new file mode 100644 index 0000000000000..9459c848d7628 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/load/load_test.go @@ -0,0 +1,290 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package load + +import ( + "bytes" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/apiserver/pkg/apis/apiserver" +) + +var defaultConfig = &api.AuthorizationConfiguration{} + +func writeTempFile(t *testing.T, content string) string { + t.Helper() + file, err := os.CreateTemp("", "config") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := os.Remove(file.Name()); err != nil { + t.Fatal(err) + } + }) + if err := os.WriteFile(file.Name(), []byte(content), 0600); err != nil { + t.Fatal(err) + } + return file.Name() +} + +func TestLoadFromFile(t *testing.T) { + // no file + { + _, err := LoadFromFile("") + if err == nil { + t.Fatalf("expected err: %v", err) + } + } + + // empty file + { + config, err := LoadFromFile(writeTempFile(t, ``)) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !reflect.DeepEqual(config, defaultConfig) { + t.Fatalf("unexpected config:\n%s", cmp.Diff(defaultConfig, config)) + } + } + + // valid file + { + input := `{ + "apiVersion":"apiserver.config.k8s.io/v1alpha1", + "kind":"AuthorizationConfiguration", + "authorizers":[{"type":"Webhook"}]}` + expect := &api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{{Type: "Webhook"}}, + } + + config, err := LoadFromFile(writeTempFile(t, input)) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !reflect.DeepEqual(config, expect) { + t.Fatalf("unexpected config:\n%s", cmp.Diff(expect, config)) + } + } + + // missing file + { + _, err := LoadFromFile(`bogus-missing-file`) + if err == nil { + t.Fatalf("expected err, got none") + } + if !strings.Contains(err.Error(), "bogus-missing-file") { + t.Fatalf("expected missing file error, got %v", err) + } + } + + // invalid content file + { + input := `{ + "apiVersion":"apiserver.config.k8s.io/v99", + "kind":"AuthorizationConfiguration", + "authorizers":{"type":"Webhook"}}` + + _, err := LoadFromFile(writeTempFile(t, input)) + if err == nil { + t.Fatalf("expected err, got none") + } + if !strings.Contains(err.Error(), "apiserver.config.k8s.io/v99") { + t.Fatalf("expected apiVersion error, got %v", err) + } + } +} + +func TestLoadFromReader(t *testing.T) { + // no reader + { + config, err := LoadFromReader(nil) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !reflect.DeepEqual(config, defaultConfig) { + t.Fatalf("unexpected config:\n%s", cmp.Diff(defaultConfig, config)) + } + } + + // empty reader + { + config, err := LoadFromReader(&bytes.Buffer{}) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !reflect.DeepEqual(config, defaultConfig) { + t.Fatalf("unexpected config:\n%s", cmp.Diff(defaultConfig, config)) + } + } + + // valid reader + { + input := `{ + "apiVersion":"apiserver.config.k8s.io/v1alpha1", + "kind":"AuthorizationConfiguration", + "authorizers":[{"type":"Webhook"}]}` + expect := &api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{{Type: "Webhook"}}, + } + + config, err := LoadFromReader(bytes.NewBufferString(input)) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + if !reflect.DeepEqual(config, expect) { + t.Fatalf("unexpected config:\n%s", cmp.Diff(expect, config)) + } + } + + // invalid reader + { + input := `{ + "apiVersion":"apiserver.config.k8s.io/v99", + "kind":"AuthorizationConfiguration", + "authorizers":[{"type":"Webhook"}]}` + + _, err := LoadFromReader(bytes.NewBufferString(input)) + if err == nil { + t.Fatalf("expected err, got none") + } + if !strings.Contains(err.Error(), "apiserver.config.k8s.io/v99") { + t.Fatalf("expected apiVersion error, got %v", err) + } + } +} + +func TestLoadFromData(t *testing.T) { + testcases := []struct { + name string + data []byte + expectErr string + expectConfig *api.AuthorizationConfiguration + }{ + { + name: "nil", + data: nil, + expectConfig: defaultConfig, + }, + { + name: "nil", + data: []byte{}, + expectConfig: defaultConfig, + }, + { + name: "v1alpha1 - json", + data: []byte(`{ +"apiVersion":"apiserver.config.k8s.io/v1alpha1", +"kind":"AuthorizationConfiguration", +"authorizers":[{"type":"Webhook"}]}`), + expectConfig: &api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{{Type: "Webhook"}}, + }, + }, + { + name: "v1alpha1 - defaults", + data: []byte(`{ +"apiVersion":"apiserver.config.k8s.io/v1alpha1", +"kind":"AuthorizationConfiguration", +"authorizers":[{"type":"Webhook","name":"default","webhook":{}}]}`), + expectConfig: &api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{{ + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + }, + }}, + }, + }, + { + name: "v1alpha1 - yaml", + data: []byte(` +apiVersion: apiserver.config.k8s.io/v1alpha1 +kind: AuthorizationConfiguration +authorizers: +- type: Webhook +`), + expectConfig: &api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{{Type: "Webhook"}}, + }, + }, + { + name: "missing apiVersion", + data: []byte(`{"kind":"AuthorizationConfiguration"}`), + expectErr: `'apiVersion' is missing`, + }, + { + name: "missing kind", + data: []byte(`{"apiVersion":"apiserver.config.k8s.io/v1alpha1"}`), + expectErr: `'Kind' is missing`, + }, + { + name: "unknown group", + data: []byte(`{"apiVersion":"apps/v1alpha1","kind":"AuthorizationConfiguration"}`), + expectErr: `apps/v1alpha1`, + }, + { + name: "unknown version", + data: []byte(`{"apiVersion":"apiserver.config.k8s.io/v99","kind":"AuthorizationConfiguration"}`), + expectErr: `apiserver.config.k8s.io/v99`, + }, + { + name: "unknown kind", + data: []byte(`{"apiVersion":"apiserver.config.k8s.io/v1alpha1","kind":"SomeConfiguration"}`), + expectErr: `SomeConfiguration`, + }, + { + name: "unknown field", + data: []byte(`{ +"apiVersion":"apiserver.config.k8s.io/v1alpha1", +"kind":"AuthorizationConfiguration", +"authorzers":[{"type":"Webhook"}]}`), + expectErr: `unknown field "authorzers"`, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + config, err := LoadFromData(tc.data) + if err != nil { + if len(tc.expectErr) == 0 { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(err.Error(), tc.expectErr) { + t.Fatalf("unexpected error: %v", err) + } + return + } + if len(tc.expectErr) > 0 { + t.Fatalf("expected err, got none") + } + + if !reflect.DeepEqual(config, tc.expectConfig) { + t.Fatalf("unexpected config:\n%s", cmp.Diff(tc.expectConfig, config)) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/register.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/register.go index 7a5f6e5854fd4..d42852d93e64f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/register.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -44,6 +44,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, &EgressSelectorConfiguration{}, &TracingConfiguration{}, ) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go index c5ec9b4fe3043..1b2b4fd5e49f5 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -198,3 +198,121 @@ type PrefixedClaimOrExpression struct { Claim string Prefix *string } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type AuthorizerType + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + Webhook *WebhookConfiguration +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string +} diff --git a/test/integration/clustercidr/main_test.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go similarity index 55% rename from test/integration/clustercidr/main_test.go rename to staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go index cf920a270324f..a9af01fe76cda 100644 --- a/test/integration/clustercidr/main_test.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clustercidr +package v1alpha1 import ( - "testing" + "time" - "k8s.io/kubernetes/test/integration/framework" + "k8s.io/apimachinery/pkg/runtime" ) -func TestMain(m *testing.M) { - framework.EtcdMain(m.Run) +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { + if obj.AuthorizedTTL.Duration == 0 { + obj.AuthorizedTTL.Duration = 5 * time.Minute + } + if obj.UnauthorizedTTL.Duration == 0 { + obj.UnauthorizedTTL.Duration = 30 * time.Second + } } diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go index dc5d3be24bbbb..7d68ac0c62ee5 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go @@ -43,7 +43,7 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) } // Adds the list of known types to the given scheme. @@ -54,6 +54,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { ) scheme.AddKnownTypes(ConfigSchemeGroupVersion, &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, &TracingConfiguration{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index 0dd36c9bb5ee4..22ac6eee3400b 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -268,3 +268,122 @@ type PrefixedClaimOrExpression struct { // +required Prefix *string `json:"prefix"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type string `json:"type"` + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string `json:"name"` + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + // Must not be defined when Type!=Webhook + Webhook *WebhookConfiguration `json:"webhook,omitempty"` +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration `json:"authorizedTTL"` + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration `json:"timeout"` + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string `json:"failurePolicy"` + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition `json:"matchConditions"` +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string `json:"type"` + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string `json:"kubeConfigFile"` +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string `json:"expression"` +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 0fd3ae69ccec8..a7a09ad0eedf3 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -66,6 +66,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope) }); err != nil { @@ -191,6 +211,36 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) }); err != nil { @@ -263,6 +313,50 @@ func Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationCon return autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in, out, s) } +func autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + out.Type = apiserver.AuthorizerType(in.Type) + out.Name = in.Name + out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + out.Type = string(in.Type) + out.Name = in.Name + out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in, out, s) +} + func autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { return err @@ -583,3 +677,83 @@ func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.U func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s) } + +func autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) +} + +func autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration is an autogenerated conversion function. +func Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. +func Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) +} + +func autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition is an autogenerated conversion function. +func Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in, out, s) +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index 328c5ddbbc5bf..5121d05e7d3c6 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -110,6 +110,59 @@ func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { *out = *in @@ -383,3 +436,65 @@ func (in *UDSTransport) DeepCopy() *UDSTransport { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go index 5070cb91b90f1..fc76be0fb8a2d 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -29,5 +29,15 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) return nil } + +func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { + for i := range in.Authorizers { + a := &in.Authorizers[i] + if a.Webhook != nil { + SetDefaults_WebhookConfiguration(a.Webhook) + } + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go index 90a5c8eb75325..59d15fdc07db9 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go @@ -18,8 +18,16 @@ package validation import ( "fmt" + utilvalidation "k8s.io/apimachinery/pkg/util/validation" "net/url" - + "os" + "path/filepath" + "strings" + "time" + + v1 "k8s.io/api/authorization/v1" + "k8s.io/api/authorization/v1beta1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" api "k8s.io/apiserver/pkg/apis/apiserver" @@ -202,3 +210,148 @@ func validateClaimMappings(m api.ClaimMappings, fldPath *field.Path) field.Error return allErrs } + +// ValidateAuthorizationConfiguration validates a given AuthorizationConfiguration. +func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.AuthorizationConfiguration, knownTypes sets.String, repeatableTypes sets.String) field.ErrorList { + allErrs := field.ErrorList{} + + if len(c.Authorizers) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("authorizers"), "at least one authorization mode must be defined")) + } + + seenAuthorizerTypes := sets.NewString() + seenAuthorizerNames := sets.NewString() + for i, a := range c.Authorizers { + fldPath := fldPath.Child("authorizers").Index(i) + aType := string(a.Type) + if aType == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "")) + continue + } + if !knownTypes.Has(aType) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), aType, knownTypes.List())) + continue + } + if seenAuthorizerTypes.Has(aType) && !repeatableTypes.Has(aType) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("type"), aType)) + continue + } + seenAuthorizerTypes.Insert(aType) + + if len(a.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if seenAuthorizerNames.Has(a.Name) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), a.Name)) + } else if errs := utilvalidation.IsDNS1123Subdomain(a.Name); len(errs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), a.Name, fmt.Sprintf("authorizer name is invalid: %s", strings.Join(errs, ", ")))) + } + seenAuthorizerNames.Insert(a.Name) + + switch a.Type { + case api.TypeWebhook: + if a.Webhook == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("webhook"), "required when type=Webhook")) + continue + } + allErrs = append(allErrs, ValidateWebhookConfiguration(fldPath, a.Webhook)...) + default: + if a.Webhook != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("webhook"), "non-null", "may only be specified when type=Webhook")) + } + } + } + + return allErrs +} + +func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + if c.Timeout.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("timeout"), "")) + } else if c.Timeout.Duration > 30*time.Second || c.Timeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("timeout"), c.Timeout.Duration.String(), "must be > 0s and <= 30s")) + } + + if c.AuthorizedTTL.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("authorizedTTL"), "")) + } else if c.AuthorizedTTL.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("authorizedTTL"), c.AuthorizedTTL.Duration.String(), "must be > 0s")) + } + + if c.UnauthorizedTTL.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("unauthorizedTTL"), "")) + } else if c.UnauthorizedTTL.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("unauthorizedTTL"), c.UnauthorizedTTL.Duration.String(), "must be > 0s")) + } + + switch c.SubjectAccessReviewVersion { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("subjectAccessReviewVersion"), "")) + case "v1": + _ = &v1.SubjectAccessReview{} + case "v1beta1": + _ = &v1beta1.SubjectAccessReview{} + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("subjectAccessReviewVersion"), c.SubjectAccessReviewVersion, []string{"v1", "v1beta1"})) + } + + switch c.MatchConditionSubjectAccessReviewVersion { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("matchConditionSubjectAccessReviewVersion"), "")) + case "v1": + _ = &v1.SubjectAccessReview{} + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("matchConditionSubjectAccessReviewVersion"), c.MatchConditionSubjectAccessReviewVersion, []string{"v1"})) + } + + switch c.FailurePolicy { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("failurePolicy"), "")) + case api.FailurePolicyNoOpinion, api.FailurePolicyDeny: + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("failurePolicy"), c.FailurePolicy, []string{"NoOpinion", "Deny"})) + } + + switch c.ConnectionInfo.Type { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "type"), "")) + case api.AuthorizationWebhookConnectionInfoTypeInCluster: + if c.ConnectionInfo.KubeConfigFile != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "can only be set when type=KubeConfigFile")) + } + case api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile: + if c.ConnectionInfo.KubeConfigFile == nil || *c.ConnectionInfo.KubeConfigFile == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "kubeConfigFile"), "")) + } else if !filepath.IsAbs(*c.ConnectionInfo.KubeConfigFile) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be an absolute path")) + } else if info, err := os.Stat(*c.ConnectionInfo.KubeConfigFile); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, fmt.Sprintf("error loading file: %v", err))) + } else if !info.Mode().IsRegular() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be a regular file")) + } + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("connectionInfo", "type"), c.ConnectionInfo, []string{api.AuthorizationWebhookConnectionInfoTypeInCluster, api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile})) + } + + // TODO: Remove this check and ensure that correct validations below for MatchConditions are added + // for i, condition := range c.MatchConditions { + // fldPath := fldPath.Child("matchConditions").Index(i).Child("expression") + // if len(strings.TrimSpace(condition.Expression)) == 0 { + // allErrs = append(allErrs, field.Required(fldPath, "")) + // } else { + // allErrs = append(allErrs, ValidateWebhookMatchCondition(fldPath, sampleSAR, condition.Expression)...) + // } + // } + if len(c.MatchConditions) != 0 { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("matchConditions"), c.MatchConditions, []string{})) + } + + return allErrs +} + +func ValidateWebhookMatchCondition(fldPath *field.Path, sampleSAR runtime.Object, expression string) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: typecheck CEL expression + return allErrs +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_test.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_test.go index 7931a458e5fab..d2e5f28c9f080 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_test.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_test.go @@ -21,11 +21,15 @@ import ( "crypto/elliptic" "crypto/rand" "encoding/pem" + "os" "testing" + "time" "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" api "k8s.io/apiserver/pkg/apis/apiserver" certutil "k8s.io/client-go/util/cert" @@ -412,3 +416,770 @@ func errString(errs errors.Aggregate) string { } return "" } + +type ( + test struct { + name string + configuration api.AuthorizationConfiguration + expectedErrList field.ErrorList + knownTypes sets.String + repeatableTypes sets.String + } +) + +func TestValidateAuthorizationConfiguration(t *testing.T) { + badKubeConfigFile := "../some/relative/path/kubeconfig" + + tempKubeConfigFile, err := os.CreateTemp("/tmp", "kubeconfig") + if err != nil { + t.Fatalf("failed to set up temp file: %v", err) + } + tempKubeConfigFilePath := tempKubeConfigFile.Name() + defer os.Remove(tempKubeConfigFilePath) + + tests := []test{ + { + name: "atleast one authorizer should be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{}, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("authorizers"), "at least one authorization mode must be defined")}, + knownTypes: sets.NewString(), + repeatableTypes: sets.NewString(), + }, + { + name: "type and name are required if an authorizer is defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + {}, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("type"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizer names should be of non-zero length", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "", + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("name"), "")}, + knownTypes: sets.NewString(string("Foo")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizer names should be unique", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "foo", + }, + { + Type: "Bar", + Name: "foo", + }, + }, + }, + expectedErrList: field.ErrorList{field.Duplicate(field.NewPath("name"), "foo")}, + knownTypes: sets.NewString(string("Foo"), string("Bar")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizer names should be DNS1123 labels", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "myauthorizer", + }, + }, + }, + expectedErrList: field.ErrorList{}, + knownTypes: sets.NewString(string("Foo")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizer names should be DNS1123 subdomains", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "foo.example.domain", + }, + }, + }, + expectedErrList: field.ErrorList{}, + knownTypes: sets.NewString(string("Foo")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizer names should not be invalid DNS1123 labels or subdomains", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "FOO.example.domain", + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("name"), "FOO.example.domain", "")}, + knownTypes: sets.NewString(string("Foo")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "bare minimum configuration with Webhook", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "bare minimum configuration with multiple webhooks", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + { + Type: "Webhook", + Name: "second-webhook", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "configuration with unknown types", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + }, + }, + }, + expectedErrList: field.ErrorList{field.NotSupported(field.NewPath("type"), "Foo", []string{"..."})}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "configuration with not repeatable types", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "foo-1", + }, + { + Type: "Foo", + Name: "foo-2", + }, + }, + }, + expectedErrList: field.ErrorList{field.Duplicate(field.NewPath("type"), "Foo")}, + knownTypes: sets.NewString(string("Foo")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "when type=Webhook, webhook needs to be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("webhook"), "required when type=Webhook")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "when type!=Webhook, webhooks needs to be nil", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Foo", + Name: "foo", + Webhook: &api.WebhookConfiguration{}, + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("webhook"), "non-null", "may only be specified when type=Webhook")}, + knownTypes: sets.NewString(string("Foo")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "timeout should be specified", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("timeout"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + // + { + name: "timeout shouldn't be zero", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: 0 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("timeout"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "timeout shouldn't be negative", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: -30 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("timeout"), time.Duration(-30*time.Second).String(), "must be > 0s and <= 30s")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "timeout shouldn't be greater than 30seconds", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: 60 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("timeout"), time.Duration(60*time.Second).String(), "must be > 0s and <= 30s")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizedTTL should be defined ", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: 5 * time.Second}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("authorizedTTL"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "authorizedTTL shouldn't be negative", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: -30 * time.Second}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("authorizedTTL"), time.Duration(-30*time.Second).String(), "must be > 0s")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "unauthorizedTTL should be defined ", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("unauthorizedTTL"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "unauthorizedTTL shouldn't be negative", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + FailurePolicy: "NoOpinion", + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: -30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("unauthorizedTTL"), time.Duration(-30*time.Second).String(), "must be > 0s")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "SAR should be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + MatchConditionSubjectAccessReviewVersion: "v1", + FailurePolicy: "NoOpinion", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("subjectAccessReviewVersion"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "SAR should be one of v1 and v1beta1", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v2beta1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.NotSupported(field.NewPath("subjectAccessReviewVersion"), "v2beta1", []string{"v1", "v1beta1"})}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "MatchConditionSAR should be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("matchConditionSubjectAccessReviewVersion"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "MatchConditionSAR must not be anything other than v1", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1beta1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.NotSupported(field.NewPath("matchConditionSubjectAccessReviewVersion"), "v1beta1", []string{"v1"})}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "failurePolicy should be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("failurePolicy"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "failurePolicy should be one of \"NoOpinion\" or \"Deny\"", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "AlwaysAllow", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.NotSupported(field.NewPath("failurePolicy"), "AlwaysAllow", []string{"NoOpinion", "Deny"})}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "connectionInfo should be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("connectionInfo"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "connectionInfo should be one of InClusterConfig or KubeConfigFile", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "ExternalClusterConfig", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{ + field.NotSupported(field.NewPath("connectionInfo"), api.WebhookConnectionInfo{Type: "ExternalClusterConfig"}, []string{"InClusterConfig", "KubeConfigFile"}), + }, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "if connectionInfo=InClusterConfig, then kubeConfigFile should be nil", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "InClusterConfig", + KubeConfigFile: new(string), + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{ + field.Invalid(field.NewPath("connectionInfo", "kubeConfigFile"), "", "can only be set when type=KubeConfigFile"), + }, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "if connectionInfo=KubeConfigFile, then KubeConfigFile should be defined", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "KubeConfigFile", + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Required(field.NewPath("kubeConfigFile"), "")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "if connectionInfo=KubeConfigFile, then KubeConfigFile should be defined, must be an absolute path, should exist, shouldn't be a symlink", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "KubeConfigFile", + KubeConfigFile: &badKubeConfigFile, + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{field.Invalid(field.NewPath("kubeConfigFile"), badKubeConfigFile, "must be an absolute path")}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + { + name: "if connectionInfo=KubeConfigFile, an existent file needs to be passed", + configuration: api.AuthorizationConfiguration{ + Authorizers: []api.AuthorizerConfiguration{ + { + Type: "Webhook", + Name: "default", + Webhook: &api.WebhookConfiguration{ + Timeout: metav1.Duration{Duration: 5 * time.Second}, + AuthorizedTTL: metav1.Duration{Duration: 5 * time.Minute}, + UnauthorizedTTL: metav1.Duration{Duration: 30 * time.Second}, + FailurePolicy: "NoOpinion", + SubjectAccessReviewVersion: "v1", + MatchConditionSubjectAccessReviewVersion: "v1", + ConnectionInfo: api.WebhookConnectionInfo{ + Type: "KubeConfigFile", + KubeConfigFile: &tempKubeConfigFilePath, + }, + }, + }, + }, + }, + expectedErrList: field.ErrorList{}, + knownTypes: sets.NewString(string("Webhook")), + repeatableTypes: sets.NewString(string("Webhook")), + }, + + // TODO: When the CEL expression validator is implemented, add a few test cases to typecheck the expression + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + errList := ValidateAuthorizationConfiguration(nil, &test.configuration, test.knownTypes, test.repeatableTypes) + if len(errList) != len(test.expectedErrList) { + t.Errorf("expected %d errs, got %d, errors %v", len(test.expectedErrList), len(errList), errList) + } + + for i, expected := range test.expectedErrList { + if expected.Type.String() != errList[i].Type.String() { + t.Errorf("expected err type %s, got %s", + expected.Type.String(), + errList[i].Type.String()) + } + if expected.BadValue != errList[i].BadValue { + t.Errorf("expected bad value '%s', got '%s'", + expected.BadValue, + errList[i].BadValue) + } + } + }) + + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index e1320e6316335..87b41f7ef6ba3 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -110,6 +110,59 @@ func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { *out = *in @@ -383,3 +436,65 @@ func (in *UDSTransport) DeepCopy() *UDSTransport { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go index f369b2229b98d..17a398ed8a40d 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/types.go @@ -235,10 +235,10 @@ type PolicyRule struct { Namespaces []string // NonResourceURLs is a set of URL paths that should be audited. - // *s are allowed, but only as the full, final step in the path. + // `*`s are allowed, but only as the full, final step in the path. // Examples: - // "/metrics" - Log requests for apiserver metrics - // "/healthz*" - Log all health checks + // `/metrics` - Log requests for apiserver metrics + // `/healthz*` - Log all health checks // +optional NonResourceURLs []string @@ -269,11 +269,11 @@ type GroupResources struct { // Resources is a list of resources this rule applies to. // // For example: - // 'pods' matches pods. - // 'pods/log' matches the log subresource of pods. - // '*' matches all resources and their subresources. - // 'pods/*' matches all subresources of pods. - // '*/scale' matches all scale subresources. + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index 8cdb12cdf967e..13c41e54ce618 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -129,11 +129,11 @@ message GroupResources { // Resources is a list of resources this rule applies to. // // For example: - // 'pods' matches pods. - // 'pods/log' matches the log subresource of pods. - // '*' matches all resources and their subresources. - // 'pods/*' matches all subresources of pods. - // '*/scale' matches all scale subresources. + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. @@ -248,10 +248,10 @@ message PolicyRule { repeated string namespaces = 6; // NonResourceURLs is a set of URL paths that should be audited. - // *s are allowed, but only as the full, final step in the path. + // `*`s are allowed, but only as the full, final step in the path. // Examples: - // "/metrics" - Log requests for apiserver metrics - // "/healthz*" - Log all health checks + // - `/metrics` - Log requests for apiserver metrics + // - `/healthz*` - Log all health checks // +optional repeated string nonResourceURLs = 7; diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go index 27f4729eaafcf..151c56c689b16 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go @@ -229,10 +229,10 @@ type PolicyRule struct { Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` // NonResourceURLs is a set of URL paths that should be audited. - // *s are allowed, but only as the full, final step in the path. + // `*`s are allowed, but only as the full, final step in the path. // Examples: - // "/metrics" - Log requests for apiserver metrics - // "/healthz*" - Log all health checks + // - `/metrics` - Log requests for apiserver metrics + // - `/healthz*` - Log all health checks // +optional NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` @@ -263,11 +263,11 @@ type GroupResources struct { // Resources is a list of resources this rule applies to. // // For example: - // 'pods' matches pods. - // 'pods/log' matches the log subresource of pods. - // '*' matches all resources and their subresources. - // 'pods/*' matches all subresources of pods. - // '*/scale' matches all scale subresources. + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. diff --git a/staging/src/k8s.io/apiserver/pkg/cel/common/adaptor.go b/staging/src/k8s.io/apiserver/pkg/cel/common/adaptor.go index c28d6ce510afa..dd94e282f4719 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/common/adaptor.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/common/adaptor.go @@ -56,12 +56,27 @@ type Schema interface { // Validations contains OpenAPI validation that the CEL library uses. type Validations interface { + Pattern() string + Minimum() *float64 + IsExclusiveMinimum() bool + Maximum() *float64 + IsExclusiveMaximum() bool + MultipleOf() *float64 + MinItems() *int64 MaxItems() *int64 + MinLength() *int64 MaxLength() *int64 + MinProperties() *int64 MaxProperties() *int64 Required() []string Enum() []any Nullable() bool + UniqueItems() bool + + AllOf() []Schema + OneOf() []Schema + AnyOf() []Schema + Not() Schema } // KubeExtensions contains Kubernetes-specific extensions to the OpenAPI schema. @@ -71,6 +86,16 @@ type KubeExtensions interface { IsXPreserveUnknownFields() bool XListType() string XListMapKeys() []string + XMapType() string + XValidations() []ValidationRule +} + +// ValidationRule represents a single x-kubernetes-validations rule. +type ValidationRule interface { + Rule() string + Message() string + MessageExpression() string + FieldPath() string } // SchemaOrBool contains either a schema or a boolean indicating if the object diff --git a/staging/src/k8s.io/apiserver/pkg/cel/common/equality.go b/staging/src/k8s.io/apiserver/pkg/cel/common/equality.go new file mode 100644 index 0000000000000..a271cae795ecf --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/cel/common/equality.go @@ -0,0 +1,296 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "reflect" +) + +// CorrelatedObject represents a node in a tree of objects that are being +// validated. It is used to keep track of the old value of an object during +// traversal of the new value. It is also used to cache the results of +// DeepEqual comparisons between the old and new values of objects. +// +// All receiver functions support being called on `nil` to support ergonomic +// recursive descent. The nil `CorrelatedObject` represents an uncorrelatable +// node in the tree. +// +// CorrelatedObject is not thread-safe. It is the responsibility of the caller +// to handle concurrency, if any. +type CorrelatedObject struct { + // Currently correlated old value during traversal of the schema/object + OldValue interface{} + + // Value being validated + Value interface{} + + // Schema used for validation of this value. The schema is also used + // to determine how to correlate the old object. + Schema Schema + + // Scratch space below, may change during validation + + // Cached comparison result of DeepEqual of `value` and `thunk.oldValue` + comparisonResult *bool + + // Cached map representation of a map-type list, or nil if not map-type list + mapList MapList + + // Children spawned by a call to `Validate` on this object + // key is either a string or an index, depending upon whether `value` is + // a map or a list, respectively. + // + // The list of children may be incomplete depending upon if the internal + // logic of kube-openapi's SchemaValidator short-circuited before + // reaching all of the children. + // + // It should be expected to have an entry for either all of the children, or + // none of them. + children map[interface{}]*CorrelatedObject +} + +func NewCorrelatedObject(new, old interface{}, schema Schema) *CorrelatedObject { + return &CorrelatedObject{ + OldValue: old, + Value: new, + Schema: schema, + } +} + +// If OldValue or Value is not a list, or the index is out of bounds of the +// Value list, returns nil +// If oldValue is a list, this considers the x-list-type to decide how to +// correlate old values: +// +// If listType is map, creates a map representation of the list using the designated +// map-keys, caches it for future calls, and returns the map value, or nil if +// the correlated key is not in the old map +// +// Otherwise, if the list type is not correlatable this funcion returns nil. +func (r *CorrelatedObject) correlateOldValueForChildAtNewIndex(index int) interface{} { + oldAsList, ok := r.OldValue.([]interface{}) + if !ok { + return nil + } + + asList, ok := r.Value.([]interface{}) + if !ok { + return nil + } else if len(asList) <= index { + // Cannot correlate out of bounds index + return nil + } + + listType := r.Schema.XListType() + switch listType { + case "map": + // Look up keys for this index in current object + currentElement := asList[index] + + oldList := r.mapList + if oldList == nil { + oldList = MakeMapList(r.Schema, oldAsList) + r.mapList = oldList + } + return oldList.Get(currentElement) + + case "set": + // Are sets correlatable? Only if the old value equals the current value. + // We might be able to support this, but do not currently see a lot + // of value + // (would allow you to add/remove items from sets with ratcheting but not change them) + return nil + case "": + fallthrough + case "atomic": + // Atomic lists are the default are not correlatable by item + // Ratcheting is not available on a per-index basis + return nil + default: + // Unrecognized list type. Assume non-correlatable. + return nil + } +} + +// CachedDeepEqual is equivalent to reflect.DeepEqual, but caches the +// results in the tree of ratchetInvocationScratch objects on the way: +// +// For objects and arrays, this function will make a best effort to make +// use of past DeepEqual checks performed by this Node's children, if available. +// +// If a lazy computation could not be found for all children possibly due +// to validation logic short circuiting and skipping the children, then +// this function simply defers to reflect.DeepEqual. +func (r *CorrelatedObject) CachedDeepEqual() (res bool) { + if r == nil { + // Uncorrelatable node is not considered equal to its old value + return false + } else if r.comparisonResult != nil { + return *r.comparisonResult + } + + defer func() { + r.comparisonResult = &res + }() + + if r.Value == nil && r.OldValue == nil { + return true + } else if r.Value == nil || r.OldValue == nil { + return false + } + + oldAsArray, oldIsArray := r.OldValue.([]interface{}) + newAsArray, newIsArray := r.Value.([]interface{}) + + oldAsMap, oldIsMap := r.OldValue.(map[string]interface{}) + newAsMap, newIsMap := r.Value.(map[string]interface{}) + + // If old and new are not the same type, they are not equal + if (oldIsArray != newIsArray) || oldIsMap != newIsMap { + return false + } + + // Objects are known to be same type of (map, slice, or primitive) + switch { + case oldIsArray: + // Both arrays case. oldIsArray == newIsArray + if len(oldAsArray) != len(newAsArray) { + return false + } + + for i := range newAsArray { + child := r.Index(i) + if child == nil { + if r.mapList == nil { + // Treat non-correlatable array as a unit with reflect.DeepEqual + return reflect.DeepEqual(oldAsArray, newAsArray) + } + + // If array is correlatable, but old not found. Just short circuit + // comparison + return false + + } else if !child.CachedDeepEqual() { + // If one child is not equal the entire object is not equal + return false + } + } + + return true + case oldIsMap: + // Both maps case. oldIsMap == newIsMap + if len(oldAsMap) != len(newAsMap) { + return false + } + + for k := range newAsMap { + child := r.Key(k) + if child == nil { + // Un-correlatable child due to key change. + // Objects are not equal. + return false + } else if !child.CachedDeepEqual() { + // If one child is not equal the entire object is not equal + return false + } + } + + return true + + default: + // Primitive: use reflect.DeepEqual + return reflect.DeepEqual(r.OldValue, r.Value) + } +} + +// Key returns the child of the receiver with the given name. +// Returns nil if the given name is does not exist in the new object, or its +// value is not correlatable to an old value. +// If receiver is nil or if the new value is not an object/map, returns nil. +func (r *CorrelatedObject) Key(field string) *CorrelatedObject { + if r == nil || r.Schema == nil { + return nil + } else if existing, exists := r.children[field]; exists { + return existing + } + + // Find correlated old value + oldAsMap, okOld := r.OldValue.(map[string]interface{}) + newAsMap, okNew := r.Value.(map[string]interface{}) + if !okOld || !okNew { + return nil + } + + oldValueForField, okOld := oldAsMap[field] + newValueForField, okNew := newAsMap[field] + if !okOld || !okNew { + return nil + } + + var propertySchema Schema + if prop, exists := r.Schema.Properties()[field]; exists { + propertySchema = prop + } else if addP := r.Schema.AdditionalProperties(); addP != nil && addP.Schema() != nil { + propertySchema = addP.Schema() + } else { + return nil + } + + if r.children == nil { + r.children = make(map[interface{}]*CorrelatedObject, len(newAsMap)) + } + + res := NewCorrelatedObject(newValueForField, oldValueForField, propertySchema) + r.children[field] = res + return res +} + +// Index returns the child of the receiver at the given index. +// Returns nil if the given index is out of bounds, or its value is not +// correlatable to an old value. +// If receiver is nil or if the new value is not an array, returns nil. +func (r *CorrelatedObject) Index(i int) *CorrelatedObject { + if r == nil || r.Schema == nil { + return nil + } else if existing, exists := r.children[i]; exists { + return existing + } + + asList, ok := r.Value.([]interface{}) + if !ok || len(asList) <= i { + return nil + } + + oldValueForIndex := r.correlateOldValueForChildAtNewIndex(i) + if oldValueForIndex == nil { + return nil + } + var itemSchema Schema + if i := r.Schema.Items(); i != nil { + itemSchema = i + } else { + return nil + } + + if r.children == nil { + r.children = make(map[interface{}]*CorrelatedObject, len(asList)) + } + + res := NewCorrelatedObject(asList[i], oldValueForIndex, itemSchema) + r.children[i] = res + return res +} diff --git a/staging/src/k8s.io/apiserver/pkg/cel/common/equality_test.go b/staging/src/k8s.io/apiserver/pkg/cel/common/equality_test.go new file mode 100644 index 0000000000000..50c5146ae451f --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/cel/common/equality_test.go @@ -0,0 +1,756 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common_test + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/cel/common" + "k8s.io/apiserver/pkg/cel/openapi" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type TestCase struct { + Name string + + // Expected old value after traversal. If nil, then the traversal should fail. + OldValue interface{} + + // Expected value after traversal. If nil, then the traversal should fail. + NewValue interface{} + + // Whether OldValue and NewValue are considered to be equal. + // Defaults to reflect.DeepEqual comparison of the two. Can be overridden to + // true here if the two values are not DeepEqual, but are considered equal + // for instance due to map-list reordering. + ExpectEqual bool + + // Schema to provide to the correlated object + Schema common.Schema + + // Array of field names and indexes to traverse to get to the value + KeyPath []interface{} + + // Root object to traverse from + RootObject interface{} + RootOldObject interface{} +} + +func (c TestCase) Run() error { + // Create the correlated object + correlatedObject := common.NewCorrelatedObject(c.RootObject, c.RootOldObject, c.Schema) + + // Traverse the correlated object + var err error + for _, key := range c.KeyPath { + if correlatedObject == nil { + break + } + + switch k := key.(type) { + case string: + correlatedObject = correlatedObject.Key(k) + case int: + correlatedObject = correlatedObject.Index(k) + default: + return errors.New("key must be a string or int") + } + if err != nil { + return err + } + } + + if correlatedObject == nil { + if c.OldValue != nil || c.NewValue != nil { + return fmt.Errorf("expected non-nil value, got nil") + } + } else { + // Check that the correlated object has the expected values + if !reflect.DeepEqual(correlatedObject.Value, c.NewValue) { + return fmt.Errorf("expected value %v, got %v", c.NewValue, correlatedObject.Value) + } + if !reflect.DeepEqual(correlatedObject.OldValue, c.OldValue) { + return fmt.Errorf("expected old value %v, got %v", c.OldValue, correlatedObject.OldValue) + } + + // Check that the correlated object is considered equal to the expected value + if (c.ExpectEqual || reflect.DeepEqual(correlatedObject.Value, correlatedObject.OldValue)) != correlatedObject.CachedDeepEqual() { + return fmt.Errorf("expected equal, got not equal") + } + } + + return nil +} + +// Creates a *spec.Schema Schema by decoding the given YAML. Panics on error +func mustSchema(source string) *openapi.Schema { + d := yaml.NewYAMLOrJSONDecoder(strings.NewReader(source), 4096) + res := &spec.Schema{} + if err := d.Decode(res); err != nil { + panic(err) + } + return &openapi.Schema{Schema: res} +} + +// Creates an *unstructured by decoding the given YAML. Panics on error +func mustUnstructured(source string) interface{} { + d := yaml.NewYAMLOrJSONDecoder(strings.NewReader(source), 4096) + var res interface{} + if err := d.Decode(&res); err != nil { + panic(err) + } + return res +} + +func TestCorrelation(t *testing.T) { + // Tests ensure that the output of following keypath using the given + // schema and root objects yields the provided new value and old value. + // If new or old are nil, then ensures that the traversal failed due to + // uncorrelatable field path. + // Also confirms that CachedDeepEqual output is equal to expected result of + // reflect.DeepEqual of the new and old values. + cases := []TestCase{ + { + Name: "Basic Key", + RootObject: mustUnstructured(`a: b`), + RootOldObject: mustUnstructured(`a: b`), + Schema: mustSchema(` + properties: + a: { type: string } + `), + KeyPath: []interface{}{"a"}, + NewValue: "b", + OldValue: "b", + }, + { + Name: "Atomic Array not correlatable", + RootObject: mustUnstructured(`[a, b]`), + RootOldObject: mustUnstructured(`[a, b]`), + Schema: mustSchema(` + items: + type: string + `), + KeyPath: []interface{}{1}, + }, + { + Name: "Added Key Not In Old Object", + RootObject: mustUnstructured(` + a: b + c: d + `), + RootOldObject: mustUnstructured(` + a: b + `), + Schema: mustSchema(` + properties: + a: { type: string } + c: { type: string } + `), + KeyPath: []interface{}{"c"}, + }, + { + Name: "Added Index Not In Old Object", + RootObject: mustUnstructured(` + - a + - b + - c + `), + RootOldObject: mustUnstructured(` + - a + - b + `), + Schema: mustSchema(` + items: + type: string + `), + KeyPath: []interface{}{2}, + }, + { + Name: "Changed Index In Old Object not correlatable", + RootObject: []interface{}{ + "a", + "b", + }, + RootOldObject: []interface{}{ + "a", + "oldB", + }, + Schema: mustSchema(` + items: + type: string + `), + KeyPath: []interface{}{1}, + }, + { + Name: "Changed Index In Nested Old Object", + RootObject: []interface{}{ + "a", + "b", + }, + RootOldObject: []interface{}{ + "a", + "oldB", + }, + Schema: mustSchema(` + items: + type: string + `), + KeyPath: []interface{}{}, + NewValue: []interface{}{"a", "b"}, + OldValue: []interface{}{"a", "oldB"}, + }, + { + Name: "Changed Key In Old Object", + RootObject: map[string]interface{}{ + "a": "b", + }, + RootOldObject: map[string]interface{}{ + "a": "oldB", + }, + Schema: mustSchema(` + properties: + a: { type: string } + `), + KeyPath: []interface{}{"a"}, + NewValue: "b", + OldValue: "oldB", + }, + { + Name: "Replaced Key In Old Object", + RootObject: map[string]interface{}{ + "a": "b", + }, + RootOldObject: map[string]interface{}{ + "b": "a", + }, + Schema: mustSchema(` + properties: + a: { type: string } + `), + KeyPath: []interface{}{}, + NewValue: map[string]interface{}{"a": "b"}, + OldValue: map[string]interface{}{"b": "a"}, + }, + { + Name: "Added Key In Old Object", + RootObject: map[string]interface{}{ + "a": "b", + }, + RootOldObject: map[string]interface{}{}, + Schema: mustSchema(` + properties: + a: { type: string } + `), + KeyPath: []interface{}{}, + NewValue: map[string]interface{}{"a": "b"}, + OldValue: map[string]interface{}{}, + }, + { + Name: "Changed list to map", + RootObject: map[string]interface{}{ + "a": "b", + }, + RootOldObject: []interface{}{"a", "b"}, + Schema: mustSchema(` + properties: + a: { type: string } + `), + KeyPath: []interface{}{}, + NewValue: map[string]interface{}{"a": "b"}, + OldValue: []interface{}{"a", "b"}, + }, + { + Name: "Changed string to map", + RootObject: map[string]interface{}{ + "a": "b", + }, + RootOldObject: "a string", + Schema: mustSchema(` + properties: + a: { type: string } + `), + KeyPath: []interface{}{}, + NewValue: map[string]interface{}{"a": "b"}, + OldValue: "a string", + }, + { + Name: "Map list type", + RootObject: mustUnstructured(` + foo: + - bar: baz + val: newBazValue + `), + RootOldObject: mustUnstructured(` + foo: + - bar: fizz + val: fizzValue + - bar: baz + val: bazValue + `), + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + properties: + bar: + type: string + val: + type: string + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - bar + `), + KeyPath: []interface{}{"foo", 0, "val"}, + NewValue: "newBazValue", + OldValue: "bazValue", + }, + { + Name: "Atomic list item should not correlate", + RootObject: mustUnstructured(` + foo: + - bar: baz + val: newValue + `), + RootOldObject: mustUnstructured(` + foo: + - bar: fizz + val: fizzValue + - bar: baz + val: barValue + `), + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + properties: + bar: + type: string + val: + type: string + x-kubernetes-list-type: atomic + `), + KeyPath: []interface{}{"foo", 0, "val"}, + }, + { + Name: "Map used inside of map list type should correlate", + RootObject: mustUnstructured(` + foo: + - key: keyValue + bar: + baz: newValue + `), + RootOldObject: mustUnstructured(` + foo: + - key: otherKeyValue + bar: + baz: otherOldValue + - key: altKeyValue + bar: + baz: altOldValue + - key: keyValue + bar: + baz: oldValue + `), + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + properties: + key: + type: string + bar: + type: object + properties: + baz: + type: string + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - key + `), + KeyPath: []interface{}{"foo", 0, "bar", "baz"}, + NewValue: "newValue", + OldValue: "oldValue", + }, + { + Name: "Map used inside another map should correlate", + RootObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: newValue + `), + RootOldObject: mustUnstructured(` + foo: + key: otherKeyValue + bar: + baz: otherOldValue + altFoo: + key: altKeyValue + bar: + baz: altOldValue + otherFoo: + key: keyValue + bar: + baz: oldValue + `), + Schema: mustSchema(` + properties: + foo: + type: object + properties: + key: + type: string + bar: + type: object + properties: + baz: + type: string + `), + KeyPath: []interface{}{"foo", "bar"}, + NewValue: map[string]interface{}{"baz": "newValue"}, + OldValue: map[string]interface{}{"baz": "otherOldValue"}, + }, + { + Name: "Nested map equal to old", + RootObject: mustUnstructured(` + foo: + key: newKeyValue + bar: + baz: value + `), + RootOldObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: value + `), + Schema: mustSchema(` + properties: + foo: + type: object + properties: + key: + type: string + bar: + type: object + properties: + baz: + type: string + `), + KeyPath: []interface{}{"foo", "bar"}, + NewValue: map[string]interface{}{"baz": "value"}, + OldValue: map[string]interface{}{"baz": "value"}, + }, + { + Name: "Re-ordered list considered equal to old value due to map keys", + RootObject: mustUnstructured(` + foo: + - key: keyValue + bar: + baz: value + - key: altKeyValue + bar: + baz: altValue + `), + RootOldObject: mustUnstructured(` + foo: + - key: altKeyValue + bar: + baz: altValue + - key: keyValue + bar: + baz: value + `), + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + properties: + key: + type: string + bar: + type: object + properties: + baz: + type: string + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - key + `), + KeyPath: []interface{}{"foo"}, + NewValue: mustUnstructured(` + - key: keyValue + bar: + baz: value + - key: altKeyValue + bar: + baz: altValue + `), + OldValue: mustUnstructured(` + - key: altKeyValue + bar: + baz: altValue + - key: keyValue + bar: + baz: value + `), + ExpectEqual: true, + }, + { + Name: "Correlate unknown string key via additional properties", + RootObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: newValue + `), + RootOldObject: mustUnstructured(` + foo: + key: otherKeyValue + bar: + baz: otherOldValue + `), + Schema: mustSchema(` + properties: + foo: + type: object + additionalProperties: + properties: + baz: + type: string + `), + KeyPath: []interface{}{"foo", "bar", "baz"}, + NewValue: "newValue", + OldValue: "otherOldValue", + }, + { + Name: "Changed map value", + RootObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: newValue + `), + RootOldObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: oldValue + `), + Schema: mustSchema(` + properties: + foo: + type: object + properties: + key: + type: string + bar: + type: object + properties: + baz: + type: string + `), + KeyPath: []interface{}{"foo", "bar"}, + NewValue: mustUnstructured(` + baz: newValue + `), + OldValue: mustUnstructured(` + baz: oldValue + `), + }, + { + Name: "Changed nested map value", + RootObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: newValue + `), + RootOldObject: mustUnstructured(` + foo: + key: keyValue + bar: + baz: oldValue + `), + Schema: mustSchema(` + properties: + foo: + type: object + properties: + key: + type: string + bar: + type: object + properties: + baz: + type: string + `), + KeyPath: []interface{}{"foo"}, + NewValue: mustUnstructured(` + key: keyValue + bar: + baz: newValue + `), + OldValue: mustUnstructured(` + key: keyValue + bar: + baz: oldValue + `), + }, + { + Name: "unchanged list type set with atomic map values", + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + x-kubernetes-map-type: atomic + properties: + key: + type: string + bar: + type: string + x-kubernetes-list-type: set + `), + RootObject: mustUnstructured(` + foo: + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + RootOldObject: mustUnstructured(` + foo: + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + KeyPath: []interface{}{"foo"}, + NewValue: mustUnstructured(` + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + OldValue: mustUnstructured(` + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + }, + { + Name: "changed list type set with atomic map values", + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + x-kubernetes-map-type: atomic + properties: + key: + type: string + bar: + type: string + x-kubernetes-list-type: set + `), + RootObject: mustUnstructured(` + foo: + - key: key1 + bar: value1 + - key: key2 + bar: newValue2 + `), + RootOldObject: mustUnstructured(` + foo: + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + KeyPath: []interface{}{"foo"}, + NewValue: mustUnstructured(` + - key: key1 + bar: value1 + - key: key2 + bar: newValue2 + `), + OldValue: mustUnstructured(` + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + }, + { + Name: "elements of list type set with atomic map values are not correlated", + Schema: mustSchema(` + properties: + foo: + type: array + items: + type: object + x-kubernetes-map-type: atomic + properties: + key: + type: string + bar: + type: string + x-kubernetes-list-type: set + `), + RootObject: mustUnstructured(` + foo: + - key: key1 + bar: value1 + - key: key2 + bar: newValue2 + `), + RootOldObject: mustUnstructured(` + foo: + - key: key1 + bar: value1 + - key: key2 + bar: value2 + `), + KeyPath: []interface{}{"foo", 0, "key"}, + NewValue: nil, + }, + } + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + if err := c.Run(); err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/cel/common/schemas.go b/staging/src/k8s.io/apiserver/pkg/cel/common/schemas.go index 3fdd3a6c8baab..19392babeb2c6 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/common/schemas.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/common/schemas.go @@ -165,7 +165,11 @@ func SchemaDeclType(s Schema, isResourceRoot bool) *apiservercel.DeclType { // unicode code point can be up to 4 bytes long) strWithMaxLength.MaxElements = zeroIfNegative(*s.MaxLength()) * 4 } else { - strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s) + if len(s.Enum()) > 0 { + strWithMaxLength.MaxElements = estimateMaxStringEnumLength(s) + } else { + strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s) + } } return strWithMaxLength case "boolean": @@ -239,6 +243,19 @@ func estimateMaxStringLengthPerRequest(s Schema) int64 { } } +// estimateMaxStringLengthPerRequest estimates the maximum string length (in characters) +// that has a set of enum values. +// The result of the estimation is the length of the longest possible value. +func estimateMaxStringEnumLength(s Schema) int64 { + var maxLength int64 + for _, v := range s.Enum() { + if s, ok := v.(string); ok && int64(len(s)) > maxLength { + maxLength = int64(len(s)) + } + } + return maxLength +} + // estimateMaxArrayItemsPerRequest estimates the maximum number of array items with // the provided minimum serialized size that can fit into a single request. func estimateMaxArrayItemsFromMinSize(minSize int64) int64 { diff --git a/staging/src/k8s.io/apiserver/pkg/cel/common/values.go b/staging/src/k8s.io/apiserver/pkg/cel/common/values.go index d9034a80fb2a4..c8279f01371bf 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/common/values.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/common/values.go @@ -84,18 +84,22 @@ func UnstructuredToVal(unstructured interface{}, schema Schema) ref.Val { }, } } - // A object with x-kubernetes-preserve-unknown-fields but no properties or additionalProperties is treated - // as an empty object. - if schema.IsXPreserveUnknownFields() { - return &unstructuredMap{ - value: m, - schema: schema, - propSchema: func(key string) (Schema, bool) { - return nil, false - }, - } + + // properties and additionalProperties are mutual exclusive, but nothing prevents the situation + // where both are missing. + // An object that (1) has no properties (2) has no additionalProperties or additionalProperties == false + // is treated as an empty object. + // An object that has additionalProperties == true is treated as an unstructured map. + // An object that has x-kubernetes-preserve-unknown-field extension set is treated as an unstructured map. + // Empty object vs unstructured map is differentiated by unstructuredMap implementation with the set schema. + // The resulting result remains the same. + return &unstructuredMap{ + value: m, + schema: schema, + propSchema: func(key string) (Schema, bool) { + return nil, false + }, } - return types.NewErr("invalid object type, expected either Properties or AdditionalProperties with Allows=true and non-empty Schema") } if schema.Type() == "array" { diff --git a/staging/src/k8s.io/apiserver/pkg/cel/library/cost_test.go b/staging/src/k8s.io/apiserver/pkg/cel/library/cost_test.go index 479e9ed1fe4d0..89768812d23e8 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/library/cost_test.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/library/cost_test.go @@ -409,6 +409,124 @@ func TestAuthzLibrary(t *testing.T) { } } +func TestQuantityCost(t *testing.T) { + cases := []struct { + name string + expr string + expectEstimatedCost checker.CostEstimate + expectRuntimeCost uint64 + }{ + { + name: "path", + expr: `quantity("12Mi")`, + expectEstimatedCost: checker.CostEstimate{Min: 1, Max: 1}, + expectRuntimeCost: 1, + }, + { + name: "isQuantity", + expr: `isQuantity("20")`, + expectEstimatedCost: checker.CostEstimate{Min: 1, Max: 1}, + expectRuntimeCost: 1, + }, + { + name: "isQuantity_megabytes", + expr: `isQuantity("20M")`, + expectEstimatedCost: checker.CostEstimate{Min: 1, Max: 1}, + expectRuntimeCost: 1, + }, + { + name: "equality_reflexivity", + expr: `quantity("200M") == quantity("200M")`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 1844674407370955266}, + expectRuntimeCost: 3, + }, + { + name: "equality_symmetry", + expr: `quantity("200M") == quantity("0.2G") && quantity("0.2G") == quantity("200M")`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 3689348814741910532}, + expectRuntimeCost: 6, + }, + { + name: "equality_transitivity", + expr: `quantity("2M") == quantity("0.002G") && quantity("2000k") == quantity("2M") && quantity("0.002G") == quantity("2000k")`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 5534023222112865798}, + expectRuntimeCost: 9, + }, + { + name: "quantity_less", + expr: `quantity("50M").isLessThan(quantity("50Mi"))`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 3}, + expectRuntimeCost: 3, + }, + { + name: "quantity_greater", + expr: `quantity("50Mi").isGreaterThan(quantity("50M"))`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 3}, + expectRuntimeCost: 3, + }, + { + name: "compare_equal", + expr: `quantity("200M").compareTo(quantity("0.2G")) > 0`, + expectEstimatedCost: checker.CostEstimate{Min: 4, Max: 4}, + expectRuntimeCost: 4, + }, + { + name: "add_quantity", + expr: `quantity("50k").add(quantity("20")) == quantity("50.02k")`, + expectEstimatedCost: checker.CostEstimate{Min: 5, Max: 1844674407370955268}, + expectRuntimeCost: 5, + }, + { + name: "sub_quantity", + expr: `quantity("50k").sub(quantity("20")) == quantity("49.98k")`, + expectEstimatedCost: checker.CostEstimate{Min: 5, Max: 1844674407370955268}, + expectRuntimeCost: 5, + }, + { + name: "sub_int", + expr: `quantity("50k").sub(20) == quantity("49980")`, + expectEstimatedCost: checker.CostEstimate{Min: 4, Max: 1844674407370955267}, + expectRuntimeCost: 4, + }, + { + name: "arith_chain_1", + expr: `quantity("50k").add(20).sub(quantity("100k")).asInteger() > 0`, + expectEstimatedCost: checker.CostEstimate{Min: 6, Max: 6}, + expectRuntimeCost: 6, + }, + { + name: "arith_chain", + expr: `quantity("50k").add(20).sub(quantity("100k")).sub(-50000).asInteger() > 0`, + expectEstimatedCost: checker.CostEstimate{Min: 7, Max: 7}, + expectRuntimeCost: 7, + }, + { + name: "as_integer", + expr: `quantity("50k").asInteger() > 0`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 3}, + expectRuntimeCost: 3, + }, + { + name: "is_integer", + expr: `quantity("50").isInteger()`, + expectEstimatedCost: checker.CostEstimate{Min: 2, Max: 2}, + expectRuntimeCost: 2, + }, + { + name: "as_float", + expr: `quantity("50.703k").asApproximateFloat() > 0.0`, + expectEstimatedCost: checker.CostEstimate{Min: 3, Max: 3}, + expectRuntimeCost: 3, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + testCost(t, tc.expr, tc.expectEstimatedCost, tc.expectRuntimeCost) + }) + } +} + func testCost(t *testing.T, expr string, expectEsimatedCost checker.CostEstimate, expectRuntimeCost uint64) { est := &CostEstimator{SizeEstimator: &testCostEstimator{}} env, err := cel.NewEnv( @@ -417,6 +535,7 @@ func testCost(t *testing.T, expr string, expectEsimatedCost checker.CostEstimate Regex(), Lists(), Authz(), + Quantity(), ) if err != nil { t.Fatalf("%v", err) diff --git a/staging/src/k8s.io/apiserver/pkg/cel/openapi/adaptor.go b/staging/src/k8s.io/apiserver/pkg/cel/openapi/adaptor.go index 0e2cc6e2b2e85..bc7b0d8c9590b 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/openapi/adaptor.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/openapi/adaptor.go @@ -54,6 +54,10 @@ func (s *Schema) Format() string { return s.Schema.Format } +func (s *Schema) Pattern() string { + return s.Schema.Pattern +} + func (s *Schema) Items() common.Schema { if s.Schema.Items == nil || s.Schema.Items.Schema == nil { return nil @@ -86,14 +90,50 @@ func (s *Schema) Default() any { return s.Schema.Default } +func (s *Schema) Minimum() *float64 { + return s.Schema.Minimum +} + +func (s *Schema) IsExclusiveMinimum() bool { + return s.Schema.ExclusiveMinimum +} + +func (s *Schema) Maximum() *float64 { + return s.Schema.Maximum +} + +func (s *Schema) IsExclusiveMaximum() bool { + return s.Schema.ExclusiveMaximum +} + +func (s *Schema) MultipleOf() *float64 { + return s.Schema.MultipleOf +} + +func (s *Schema) UniqueItems() bool { + return s.Schema.UniqueItems +} + +func (s *Schema) MinItems() *int64 { + return s.Schema.MinItems +} + func (s *Schema) MaxItems() *int64 { return s.Schema.MaxItems } +func (s *Schema) MinLength() *int64 { + return s.Schema.MinLength +} + func (s *Schema) MaxLength() *int64 { return s.Schema.MaxLength } +func (s *Schema) MinProperties() *int64 { + return s.Schema.MinProperties +} + func (s *Schema) MaxProperties() *int64 { return s.Schema.MaxProperties } @@ -110,6 +150,40 @@ func (s *Schema) Nullable() bool { return s.Schema.Nullable } +func (s *Schema) AllOf() []common.Schema { + var res []common.Schema + for _, nestedSchema := range s.Schema.AllOf { + nestedSchema := nestedSchema + res = append(res, &Schema{&nestedSchema}) + } + return res +} + +func (s *Schema) AnyOf() []common.Schema { + var res []common.Schema + for _, nestedSchema := range s.Schema.AnyOf { + nestedSchema := nestedSchema + res = append(res, &Schema{&nestedSchema}) + } + return res +} + +func (s *Schema) OneOf() []common.Schema { + var res []common.Schema + for _, nestedSchema := range s.Schema.OneOf { + nestedSchema := nestedSchema + res = append(res, &Schema{&nestedSchema}) + } + return res +} + +func (s *Schema) Not() common.Schema { + if s.Schema.Not == nil { + return nil + } + return &Schema{s.Schema.Not} +} + func (s *Schema) IsXIntOrString() bool { return isXIntOrString(s.Schema) } @@ -126,10 +200,18 @@ func (s *Schema) XListType() string { return getXListType(s.Schema) } +func (s *Schema) XMapType() string { + return getXMapType(s.Schema) +} + func (s *Schema) XListMapKeys() []string { return getXListMapKeys(s.Schema) } +func (s *Schema) XValidations() []common.ValidationRule { + return getXValidations(s.Schema) +} + func (s *Schema) WithTypeAndObjectMeta() common.Schema { return &Schema{common.WithTypeAndObjectMeta(s.Schema)} } diff --git a/staging/src/k8s.io/apiserver/pkg/cel/openapi/extensions.go b/staging/src/k8s.io/apiserver/pkg/cel/openapi/extensions.go index 6a2f830320ba1..3bb3bccf05782 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/openapi/extensions.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/openapi/extensions.go @@ -18,6 +18,7 @@ package openapi import ( "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apiserver/pkg/cel/common" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -47,6 +48,11 @@ func getXListType(schema *spec.Schema) string { return s } +func getXMapType(schema *spec.Schema) string { + s, _ := schema.Extensions.GetString(extMapType) + return s +} + func getXListMapKeys(schema *spec.Schema) []string { mapKeys, ok := schema.Extensions.GetStringSlice(extListMapKeys) if !ok { @@ -55,8 +61,47 @@ func getXListMapKeys(schema *spec.Schema) []string { return mapKeys } +type ValidationRule struct { + RuleField string `json:"rule"` + MessageField string `json:"message"` + MessageExpressionField string `json:"messageExpression"` + PathField string `json:"fieldPath"` +} + +func (v ValidationRule) Rule() string { + return v.RuleField +} + +func (v ValidationRule) Message() string { + return v.MessageField +} + +func (v ValidationRule) FieldPath() string { + return v.PathField +} + +func (v ValidationRule) MessageExpression() string { + return v.MessageExpressionField +} + +// TODO: simplify +func getXValidations(schema *spec.Schema) []common.ValidationRule { + var rules []ValidationRule + err := schema.Extensions.GetObject(extValidations, &rules) + if err != nil { + return nil + } + results := make([]common.ValidationRule, len(rules)) + for i, rule := range rules { + results[i] = rule + } + return results +} + const extIntOrString = "x-kubernetes-int-or-string" const extEmbeddedResource = "x-kubernetes-embedded-resource" const extPreserveUnknownFields = "x-kubernetes-preserve-unknown-fields" const extListType = "x-kubernetes-list-type" +const extMapType = "x-kubernetes-map-type" const extListMapKeys = "x-kubernetes-list-map-keys" +const extValidations = "x-kubernetes-validations" diff --git a/staging/src/k8s.io/apiserver/pkg/cel/openapi/values_test.go b/staging/src/k8s.io/apiserver/pkg/cel/openapi/values_test.go index 0607327f8bba7..6914cce465f4d 100644 --- a/staging/src/k8s.io/apiserver/pkg/cel/openapi/values_test.go +++ b/staging/src/k8s.io/apiserver/pkg/cel/openapi/values_test.go @@ -97,6 +97,9 @@ var ( Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{Schema: stringSchema}, }} + emptyObjectSchema = &spec.Schema{ + SchemaProps: spec.SchemaProps{Type: []string{"object"}}, + } ) func TestEquality(t *testing.T) { @@ -331,6 +334,12 @@ func TestEquality(t *testing.T) { rhs: UnstructuredToVal([]interface{}{"a", "b", "c"}, atomicListSchema), equal: false, }, + { + name: "empty objects are equal", + lhs: UnstructuredToVal(map[string]interface{}{}, emptyObjectSchema), + rhs: UnstructuredToVal(map[string]interface{}{}, emptyObjectSchema), + equal: true, + }, { name: "identical objects are equal", lhs: UnstructuredToVal(map[string]interface{}{"field1": "a", "field2": "b"}, objectSchema), diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index 277bdcdfe5f60..64b3569d0d9f6 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -29,8 +29,11 @@ import ( "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" ) @@ -101,6 +104,18 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed ) } + // http2 is an expensive protocol that is prone to abuse, + // see CVE-2023-44487 and CVE-2023-39325 for an example. + // Do not allow unauthenticated clients to keep these + // connections open (i.e. basically degrade them to the + // performance of http1 with keep-alive disabled). + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.UnauthenticatedHTTP2DOSMitigation) && req.ProtoMajor == 2 && isAnonymousUser(resp.User) { + // limit this connection to just this request, + // and then send a GOAWAY and tear down the TCP connection + // https://github.com/golang/net/commit/97aa3a539ec716117a9d15a4659a911f50d13c3c + w.Header().Set("Connection", "close") + } + req = req.WithContext(genericapirequest.WithUser(req.Context(), resp.User)) handler.ServeHTTP(w, req) }) @@ -108,6 +123,17 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed func Unauthorized(s runtime.NegotiatedSerializer) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // http2 is an expensive protocol that is prone to abuse, + // see CVE-2023-44487 and CVE-2023-39325 for an example. + // Do not allow unauthenticated clients to keep these + // connections open (i.e. basically degrade them to the + // performance of http1 with keep-alive disabled). + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.UnauthenticatedHTTP2DOSMitigation) && req.ProtoMajor == 2 { + // limit this connection to just this request, + // and then send a GOAWAY and tear down the TCP connection + // https://github.com/golang/net/commit/97aa3a539ec716117a9d15a4659a911f50d13c3c + w.Header().Set("Connection", "close") + } ctx := req.Context() requestInfo, found := genericapirequest.RequestInfoFrom(ctx) if !found { @@ -127,3 +153,15 @@ func audiencesAreAcceptable(apiAuds, responseAudiences authenticator.Audiences) return len(apiAuds.Intersect(responseAudiences)) > 0 } + +func isAnonymousUser(u user.Info) bool { + if u.GetName() == user.Anonymous { + return true + } + for _, group := range u.GetGroups() { + if group == user.AllUnauthenticated { + return true + } + } + return false +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication_test.go index 2bdde2741ebb2..7026a2961dcec 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication_test.go @@ -18,20 +18,31 @@ package filters import ( "context" + "crypto/tls" + "crypto/x509" "errors" + "io" + "net" "net/http" "net/http/httptest" + "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" + "golang.org/x/net/http2" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authentication/request/anonymous" "k8s.io/apiserver/pkg/authentication/request/headerrequest" "k8s.io/apiserver/pkg/authentication/user" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/kubernetes/scheme" + featuregatetesting "k8s.io/component-base/featuregate/testing" ) func TestAuthenticateRequestWithAud(t *testing.T) { @@ -465,3 +476,192 @@ func TestAuthenticateRequestClearHeaders(t *testing.T) { }) } } + +func TestUnauthenticatedHTTP2ClientConnectionClose(t *testing.T) { + s := httptest.NewUnstartedServer(WithAuthentication( + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte("ok")) }), + authenticator.RequestFunc(func(r *http.Request) (*authenticator.Response, bool, error) { + switch r.Header.Get("Authorization") { + case "known": + return &authenticator.Response{User: &user.DefaultInfo{Name: "panda"}}, true, nil + case "error": + return nil, false, errors.New("authn err") + case "anonymous": + return anonymous.NewAuthenticator().AuthenticateRequest(r) + case "anonymous_group": + return &authenticator.Response{User: &user.DefaultInfo{Groups: []string{user.AllUnauthenticated}}}, true, nil + default: + return nil, false, nil + } + }), + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = r.WithContext(genericapirequest.WithRequestInfo(r.Context(), &genericapirequest.RequestInfo{})) + Unauthorized(scheme.Codecs).ServeHTTP(w, r) + }), + nil, + nil, + )) + + http2Options := &http2.Server{} + + if err := http2.ConfigureServer(s.Config, http2Options); err != nil { + t.Fatal(err) + } + + s.TLS = s.Config.TLSConfig + + s.StartTLS() + t.Cleanup(s.Close) + + const reqs = 4 + + cases := []struct { + name string + authorizationHeader string + skipHTTP2DOSMitigation bool + expectConnections uint64 + }{ + { + name: "known", + authorizationHeader: "known", + skipHTTP2DOSMitigation: false, + expectConnections: 1, + }, + { + name: "error", + authorizationHeader: "error", + skipHTTP2DOSMitigation: false, + expectConnections: reqs, + }, + { + name: "anonymous", + authorizationHeader: "anonymous", + skipHTTP2DOSMitigation: false, + expectConnections: reqs, + }, + { + name: "anonymous_group", + authorizationHeader: "anonymous_group", + skipHTTP2DOSMitigation: false, + expectConnections: reqs, + }, + { + name: "other", + authorizationHeader: "other", + skipHTTP2DOSMitigation: false, + expectConnections: reqs, + }, + + { + name: "known skip=true", + authorizationHeader: "known", + skipHTTP2DOSMitigation: true, + expectConnections: 1, + }, + { + name: "error skip=true", + authorizationHeader: "error", + skipHTTP2DOSMitigation: true, + expectConnections: 1, + }, + { + name: "anonymous skip=true", + authorizationHeader: "anonymous", + skipHTTP2DOSMitigation: true, + expectConnections: 1, + }, + { + name: "anonymous_group skip=true", + authorizationHeader: "anonymous_group", + skipHTTP2DOSMitigation: true, + expectConnections: 1, + }, + { + name: "other skip=true", + authorizationHeader: "other", + skipHTTP2DOSMitigation: true, + expectConnections: 1, + }, + } + + rootCAs := x509.NewCertPool() + rootCAs.AddCert(s.Certificate()) + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + f := func(t *testing.T, nextProto string, expectConnections uint64) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.UnauthenticatedHTTP2DOSMitigation, !tc.skipHTTP2DOSMitigation)() + + var localAddrs atomic.Uint64 // indicates how many TCP connection set up + + tlsConfig := &tls.Config{ + RootCAs: rootCAs, + NextProtos: []string{nextProto}, + } + + dailer := tls.Dialer{ + Config: tlsConfig, + } + + tr := &http.Transport{ + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := dailer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + + localAddrs.Add(1) + + return conn, nil + }, + } + + tr.MaxIdleConnsPerHost = 1 // allow http1 to have keep alive connections open + if nextProto == http2.NextProtoTLS { + // Disable connection pooling to avoid additional connections + // that cause the test to flake + tr.MaxIdleConnsPerHost = -1 + if err := http2.ConfigureTransport(tr); err != nil { + t.Fatal(err) + } + } + + client := &http.Client{ + Transport: tr, + } + + for i := 0; i < reqs; i++ { + req, err := http.NewRequest(http.MethodGet, s.URL, nil) + if err != nil { + t.Fatal(err) + } + if len(tc.authorizationHeader) > 0 { + req.Header.Set("Authorization", tc.authorizationHeader) + } + + resp, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } + + if expectConnections != localAddrs.Load() { + t.Fatalf("expect TCP connection: %d, actual: %d", expectConnections, localAddrs.Load()) + } + } + + t.Run(http2.NextProtoTLS, func(t *testing.T) { + f(t, http2.NextProtoTLS, tc.expectConnections) + }) + + // http1 connection reuse occasionally flakes on CI, skipping for now + // t.Run("http/1.1", func(t *testing.T) { + // f(t, "http/1.1", 1) + // }) + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go index 67a1790c56aa6..1ecf59d454389 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go @@ -20,6 +20,7 @@ import ( "net/http" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" tracing "k8s.io/component-base/tracing" @@ -32,7 +33,15 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { otelhttp.WithPublicEndpoint(), otelhttp.WithTracerProvider(tp), } + wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add the http.target attribute to the otelhttp span + // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 + if r.URL != nil { + trace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) + } + handler.ServeHTTP(w, r) + }) // With Noop TracerProvider, the otelhttp still handles context propagation. // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough - return otelhttp.NewHandler(handler, "KubernetesAPI", opts...) + return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go index c110964fc4288..d3b501cf52a1f 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -267,7 +267,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc } requestInfo, _ := request.RequestInfoFrom(ctx) metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { - serveWatch(watcher, scope, outputMediaType, req, w, timeout) + serveWatch(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts)) }) return } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go index 7f85563699de8..2c2d3e4824b17 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go @@ -77,6 +77,96 @@ func (lazy *lazyAccept) String() string { return "unknown" } +// lazyAPIGroup implements String() string and it will +// lazily get Group from request info. +type lazyAPIGroup struct { + req *http.Request +} + +func (lazy *lazyAPIGroup) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.APIGroup + } + } + + return "unknown" +} + +// lazyAPIVersion implements String() string and it will +// lazily get Group from request info. +type lazyAPIVersion struct { + req *http.Request +} + +func (lazy *lazyAPIVersion) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.APIVersion + } + } + + return "unknown" +} + +// lazyName implements String() string and it will +// lazily get Group from request info. +type lazyName struct { + req *http.Request +} + +func (lazy *lazyName) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.Name + } + } + + return "unknown" +} + +// lazySubresource implements String() string and it will +// lazily get Group from request info. +type lazySubresource struct { + req *http.Request +} + +func (lazy *lazySubresource) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.Subresource + } + } + + return "unknown" +} + +// lazyNamespace implements String() string and it will +// lazily get Group from request info. +type lazyNamespace struct { + req *http.Request +} + +func (lazy *lazyNamespace) String() string { + if lazy.req != nil { + ctx := lazy.req.Context() + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if ok { + return requestInfo.Namespace + } + } + + return "unknown" +} + // lazyAuditID implements Stringer interface to lazily retrieve // the audit ID associated with the request. type lazyAuditID struct { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers_test.go index cdde785ea3691..81faefa7a070c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/helpers_test.go @@ -85,6 +85,66 @@ func TestLazyVerb(t *testing.T) { assert.Equal(t, "WATCH", fmt.Sprintf("%v", verbWithReq)) } +func TestLazyApiGroup(t *testing.T) { + assert.Equal(t, "unknown", fmt.Sprintf("%v", &lazyAPIGroup{})) + + scopeWithEmptyReq := &lazyAPIGroup{&http.Request{}} + assert.Equal(t, "unknown", fmt.Sprintf("%v", scopeWithEmptyReq)) + + req := &http.Request{} + ctx := request.WithRequestInfo(context.TODO(), &request.RequestInfo{APIGroup: "apps"}) + scopeWithReq := &lazyAPIGroup{req: req.WithContext(ctx)} + assert.Equal(t, "apps", fmt.Sprintf("%v", scopeWithReq)) +} + +func TestLazyApiVersion(t *testing.T) { + assert.Equal(t, "unknown", fmt.Sprintf("%v", &lazyAPIVersion{})) + + scopeWithEmptyReq := &lazyAPIVersion{&http.Request{}} + assert.Equal(t, "unknown", fmt.Sprintf("%v", scopeWithEmptyReq)) + + req := &http.Request{} + ctx := request.WithRequestInfo(context.TODO(), &request.RequestInfo{APIVersion: "v1"}) + scopeWithReq := &lazyAPIVersion{req: req.WithContext(ctx)} + assert.Equal(t, "v1", fmt.Sprintf("%v", scopeWithReq)) +} + +func TestLazyName(t *testing.T) { + assert.Equal(t, "unknown", fmt.Sprintf("%v", &lazyName{})) + + scopeWithEmptyReq := &lazyName{&http.Request{}} + assert.Equal(t, "unknown", fmt.Sprintf("%v", scopeWithEmptyReq)) + + req := &http.Request{} + ctx := request.WithRequestInfo(context.TODO(), &request.RequestInfo{Name: "jaeger-76d45d6876-vqp8t"}) + scopeWithReq := &lazyName{req: req.WithContext(ctx)} + assert.Equal(t, "jaeger-76d45d6876-vqp8t", fmt.Sprintf("%v", scopeWithReq)) +} + +func TestLazySubresource(t *testing.T) { + assert.Equal(t, "unknown", fmt.Sprintf("%v", &lazySubresource{})) + + scopeWithEmptyReq := &lazySubresource{&http.Request{}} + assert.Equal(t, "unknown", fmt.Sprintf("%v", scopeWithEmptyReq)) + + req := &http.Request{} + ctx := request.WithRequestInfo(context.TODO(), &request.RequestInfo{Subresource: "binding"}) + scopeWithReq := &lazySubresource{req: req.WithContext(ctx)} + assert.Equal(t, "binding", fmt.Sprintf("%v", scopeWithReq)) +} + +func TestLazyNamespace(t *testing.T) { + assert.Equal(t, "unknown", fmt.Sprintf("%v", &lazyNamespace{})) + + scopeWithEmptyReq := &lazyNamespace{&http.Request{}} + assert.Equal(t, "unknown", fmt.Sprintf("%v", scopeWithEmptyReq)) + + req := &http.Request{} + ctx := request.WithRequestInfo(context.TODO(), &request.RequestInfo{Namespace: "jaeger"}) + scopeWithReq := &lazyNamespace{req: req.WithContext(ctx)} + assert.Equal(t, "jaeger", fmt.Sprintf("%v", scopeWithReq)) +} + func TestLazyResource(t *testing.T) { assert.Equal(t, "unknown", fmt.Sprintf("%v", &lazyResource{})) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go index cf3205a9a93d1..91a7999680d19 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "k8s.io/component-base/metrics" ) @@ -35,8 +36,8 @@ var ( RequestBodySizes = metrics.NewHistogramVec( &metrics.HistogramOpts{ Subsystem: "apiserver", - Name: "request_body_sizes", - Help: "Apiserver request body sizes broken out by size.", + Name: "request_body_size_bytes", + Help: "Apiserver request body size in bytes broken out by resource and verb.", // we use 0.05 KB as the smallest bucket with 0.1 KB increments up to the // apiserver limit. Buckets: metrics.LinearBuckets(50000, 100000, 31), diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go index b0165e23fd210..465986319a38c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go @@ -134,42 +134,42 @@ func TestLimitedReadBody(t *testing.T) { requestBody: strings.NewReader("aaaa"), limit: 5, expectedMetrics: ` - # HELP apiserver_request_body_sizes [ALPHA] Apiserver request body sizes broken out by size. - # TYPE apiserver_request_body_sizes histogram - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="50000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="150000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="250000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="350000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="450000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="550000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="650000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="750000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="850000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="950000"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.05e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.15e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.25e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.35e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.45e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.55e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.65e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.75e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.85e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="1.95e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.05e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.15e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.25e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.35e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.45e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.55e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.65e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.75e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.85e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="2.95e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="3.05e+06"} 1 - apiserver_request_body_sizes_bucket{resource="resource.group",verb="create",le="+Inf"} 1 - apiserver_request_body_sizes_sum{resource="resource.group",verb="create"} 4 - apiserver_request_body_sizes_count{resource="resource.group",verb="create"} 1 + # HELP apiserver_request_body_size_bytes [ALPHA] Apiserver request body size in bytes broken out by resource and verb. + # TYPE apiserver_request_body_size_bytes histogram + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="50000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="150000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="250000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="350000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="450000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="550000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="650000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="750000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="850000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="950000"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.05e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.15e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.25e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.35e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.45e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.55e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.65e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.75e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.85e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="1.95e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.05e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.15e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.25e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.35e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.45e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.55e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.65e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.75e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.85e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="2.95e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="3.05e+06"} 1 + apiserver_request_body_size_bytes_bucket{resource="resource.group",verb="create",le="+Inf"} 1 + apiserver_request_body_size_bytes_sum{resource="resource.group",verb="create"} 4 + apiserver_request_body_size_bytes_count{resource="resource.group",verb="create"} 1 `, expectedErr: false, }, @@ -192,7 +192,7 @@ func TestLimitedReadBody(t *testing.T) { } return } - if err = testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(tc.expectedMetrics), "apiserver_request_body_sizes"); err != nil { + if err = testutil.GatherAndCompare(legacyregistry.DefaultGatherer, strings.NewReader(tc.expectedMetrics), "apiserver_request_body_size_bytes"); err != nil { t.Errorf("unexpected err: %v", err) } }) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go index 7d273d6224885..760c9bf40b845 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go @@ -27,6 +27,11 @@ func traceFields(req *http.Request) []attribute.KeyValue { attribute.Stringer("accept", &lazyAccept{req: req}), attribute.Stringer("audit-id", &lazyAuditID{req: req}), attribute.Stringer("client", &lazyClientIP{req: req}), + attribute.Stringer("api-group", &lazyAPIGroup{req: req}), + attribute.Stringer("api-version", &lazyAPIVersion{req: req}), + attribute.Stringer("name", &lazyName{req: req}), + attribute.Stringer("subresource", &lazySubresource{req: req}), + attribute.Stringer("namespace", &lazyNamespace{req: req}), attribute.String("protocol", req.Proto), attribute.Stringer("resource", &lazyResource{req: req}), attribute.Stringer("scope", &lazyScope{req: req}), diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index d15819f114578..e8eb0bfc2634e 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -34,6 +34,9 @@ import ( "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + utilfeature "k8s.io/apiserver/pkg/util/feature" ) // nothing will ever be sent down this channel @@ -61,7 +64,7 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) { // serveWatch will serve a watch response. // TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled. -func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) { +func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) { defer watcher.Stop() options, err := optionsForTransform(mediaTypeOptions, req) @@ -153,6 +156,8 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n TimeoutFactory: &realTimeoutFactory{timeout}, ServerShuttingDownCh: serverShuttingDownCh, + + metricsScope: metricsScope, } server.ServeHTTP(w, req) @@ -176,6 +181,8 @@ type WatchServer struct { TimeoutFactory TimeoutFactory ServerShuttingDownCh <-chan struct{} + + metricsScope string } // ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked @@ -247,6 +254,7 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } metrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() + isWatchListLatencyRecordingRequired := shouldRecordWatchListLatency(event) if err := s.EmbeddedEncoder.Encode(event.Object, buf); err != nil { // unexpected error @@ -280,6 +288,9 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { if len(ch) == 0 { flusher.Flush() } + if isWatchListLatencyRecordingRequired { + metrics.RecordWatchListLatency(req.Context(), s.Scope.Resource, s.metricsScope) + } buf.Reset() } @@ -360,3 +371,19 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { } } } + +func shouldRecordWatchListLatency(event watch.Event) bool { + if event.Type != watch.Bookmark || !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return false + } + // as of today the initial-events-end annotation is added only to a single event + // by the watch cache and only when certain conditions are met + // + // for more please read https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list + hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to determine if the obj has the required annotation for measuring watchlist latency, obj %T: %v", event.Object, err)) + return false + } + return hasAnnotation +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index 042bd802f1aaf..ffd4a7dcbfb4e 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -796,7 +796,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -817,7 +817,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...). Returns(http.StatusOK, "OK", versionedList). @@ -850,7 +850,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.PUT(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -879,7 +879,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.PATCH(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Consumes(supportedTypes...). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). @@ -909,7 +909,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } route := ws.POST(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -938,7 +938,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.DELETE(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(deleteReturnType). @@ -962,7 +962,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.DELETE(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(versionedStatus). @@ -990,7 +990,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). @@ -1011,7 +1011,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index ba2aed69d4486..48fc951adeedf 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "fmt" "net/http" "net/url" "strconv" @@ -26,8 +27,12 @@ import ( "time" restful "github.com/emicklei/go-restful/v3" + + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/user" @@ -280,6 +285,17 @@ var ( []string{"code_path"}, ) + watchListLatencies = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Subsystem: APIServerComponent, + Name: "watch_list_duration_seconds", + Help: "Response latency distribution in seconds for watch list requests broken by group, version, resource and scope.", + Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 2, 4, 6, 8, 10, 15, 20, 30, 45, 60}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"group", "version", "resource", "scope"}, + ) + metrics = []resettableCollector{ deprecatedRequestGauge, requestCounter, @@ -300,6 +316,7 @@ var ( requestAbortsTotal, requestPostTimeoutTotal, requestTimestampComparisonDuration, + watchListLatencies, } // these are the valid request methods which we report in our metrics. Any other request methods @@ -511,6 +528,18 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp fn() } +// RecordWatchListLatency simply records response latency for watch list requests. +func RecordWatchListLatency(ctx context.Context, gvr schema.GroupVersionResource, metricsScope string) { + requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx) + if !ok { + utilruntime.HandleError(fmt.Errorf("unable to measure watchlist latency because no received ts found in the ctx, gvr: %s", gvr)) + return + } + elapsedSeconds := time.Since(requestReceivedTimestamp).Seconds() + + watchListLatencies.WithContext(ctx).WithLabelValues(gvr.Group, gvr.Version, gvr.Resource, metricsScope).Observe(elapsedSeconds) +} + // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, httpCode, respSize int, elapsed time.Duration) { @@ -621,6 +650,26 @@ func CleanScope(requestInfo *request.RequestInfo) string { return "" } +// CleanListScope computes the request scope for metrics. +// +// Note that normally we would use CleanScope for computation. +// But due to the same reasons mentioned in determineRequestNamespaceAndName we cannot. +func CleanListScope(ctx context.Context, opts *metainternalversion.ListOptions) string { + namespace, name := determineRequestNamespaceAndName(ctx, opts) + if len(name) > 0 { + return "resource" + } + if len(namespace) > 0 { + return "namespace" + } + if requestInfo, ok := request.RequestInfoFrom(ctx); ok { + if requestInfo.IsResourceRequest { + return "cluster" + } + } + return "" +} + // CanonicalVerb distinguishes LISTs from GETs (and HEADs). It assumes verb is // UPPERCASE. func CanonicalVerb(verb string, scope string) string { @@ -655,6 +704,30 @@ func CleanVerb(verb string, request *http.Request, requestInfo *request.RequestI return reportedVerb } +// determineRequestNamespaceAndName computes name and namespace for the given requests +// +// note that the logic of this function was copy&pasted from cacher.go +// after an unsuccessful attempt of moving it to RequestInfo +// +// see: https://github.com/kubernetes/kubernetes/pull/120520 +func determineRequestNamespaceAndName(ctx context.Context, opts *metainternalversion.ListOptions) (namespace, name string) { + if requestNamespace, ok := request.NamespaceFrom(ctx); ok && len(requestNamespace) > 0 { + namespace = requestNamespace + } else if opts != nil && opts.FieldSelector != nil { + if selectorNamespace, ok := opts.FieldSelector.RequiresExactMatch("metadata.namespace"); ok { + namespace = selectorNamespace + } + } + if requestInfo, ok := request.RequestInfoFrom(ctx); ok && requestInfo != nil && len(requestInfo.Name) > 0 { + name = requestInfo.Name + } else if opts != nil && opts.FieldSelector != nil { + if selectorName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok { + name = selectorName + } + } + return +} + // cleanVerb additionally ensures that unknown verbs don't clog up the metrics. func cleanVerb(verb, suggestedVerb string, request *http.Request, requestInfo *request.RequestInfo) string { // CanonicalVerb (being an input for this function) doesn't handle correctly the diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go index 562230f2137e6..d640dd4c62c5a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go @@ -17,11 +17,14 @@ limitations under the License. package metrics import ( + "context" "net/http" "net/url" "strings" "testing" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/endpoints/responsewriter" "k8s.io/component-base/metrics/legacyregistry" @@ -466,3 +469,61 @@ func TestRecordDroppedRequests(t *testing.T) { }) } } + +func TestCleanListScope(t *testing.T) { + scenarios := []struct { + name string + ctx context.Context + opts *metainternalversion.ListOptions + expectedScope string + }{ + { + name: "empty scope", + }, + { + name: "empty scope with empty request info", + ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{}), + }, + { + name: "namespace from ctx", + ctx: request.WithNamespace(context.TODO(), "foo"), + expectedScope: "namespace", + }, + { + name: "namespace from field selector", + opts: &metainternalversion.ListOptions{ + FieldSelector: fields.ParseSelectorOrDie("metadata.namespace=foo"), + }, + expectedScope: "namespace", + }, + { + name: "name from request info", + ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Name: "bar"}), + expectedScope: "resource", + }, + { + name: "name from field selector", + opts: &metainternalversion.ListOptions{ + FieldSelector: fields.ParseSelectorOrDie("metadata.name=bar"), + }, + expectedScope: "resource", + }, + { + name: "cluster scope request", + ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{IsResourceRequest: true}), + expectedScope: "cluster", + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + if scenario.ctx == nil { + scenario.ctx = context.TODO() + } + actualScope := CleanListScope(scenario.ctx, scenario.opts) + if actualScope != scenario.expectedScope { + t.Errorf("unexpected scope = %s, expected = %s", actualScope, scenario.expectedScope) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go index 68b13d720ec4f..8281a1f8403d6 100644 --- a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go +++ b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go @@ -122,6 +122,7 @@ const ( // kep: https://kep.k8s.io/3299 // alpha: v1.25 // beta: v1.27 + // stable: v1.29 // // Enables KMS v2 API for encryption at rest. KMSv2 featuregate.Feature = "KMSv2" @@ -129,6 +130,7 @@ const ( // owner: @enj // kep: https://kep.k8s.io/3299 // beta: v1.28 + // stable: v1.29 // // Enables the use of derived encryption keys with KMS v2. KMSv2KDF featuregate.Feature = "KMSv2KDF" @@ -142,15 +144,6 @@ const ( // in the spec returned from kube-apiserver. OpenAPIEnums featuregate.Feature = "OpenAPIEnums" - // owner: @jefftree - // kep: https://kep.k8s.io/2896 - // alpha: v1.23 - // beta: v1.24 - // stable: v1.27 - // - // Enables kubernetes to publish OpenAPI v3 - OpenAPIV3 featuregate.Feature = "OpenAPIV3" - // owner: @caesarxuchao // alpha: v1.15 // beta: v1.16 @@ -184,6 +177,24 @@ const ( // Enables server-side field validation. ServerSideFieldValidation featuregate.Feature = "ServerSideFieldValidation" + // owner: @enj + // beta: v1.29 + // + // Enables http2 DOS mitigations for unauthenticated clients. + // + // Some known reasons to disable these mitigations: + // + // An API server that is fronted by an L7 load balancer that is set up + // to mitigate http2 attacks may opt to disable this protection to prevent + // unauthenticated clients from disabling connection reuse between the load + // balancer and the API server (many incoming connections could share the + // same backend connection). + // + // An API server that is on a private network may opt to disable this + // protection to prevent performance regressions for unauthenticated + // clients. + UnauthenticatedHTTP2DOSMitigation featuregate.Feature = "UnauthenticatedHTTP2DOSMitigation" + // owner: @caesarxuchao @roycaihw // alpha: v1.20 // @@ -205,6 +216,13 @@ const ( // Enables Structured Authentication Configuration StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration" + // owner: @palnabarun + // kep: https://kep.k8s.io/3221 + // alpha: v1.29 + // + // Enables Structured Authorization Configuration + StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration" + // owner: @wojtek-t // alpha: v1.15 // beta: v1.16 @@ -263,16 +281,14 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - KMSv1: {Default: true, PreRelease: featuregate.Deprecated}, + KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, - KMSv2: {Default: true, PreRelease: featuregate.Beta}, + KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - KMSv2KDF: {Default: true, PreRelease: featuregate.Beta}, // lock to true in 1.29 once KMSv2 is GA, remove in 1.31 + KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, @@ -287,6 +303,10 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS StructuredAuthenticationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, + StructuredAuthorizationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, + + UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, + WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index d678f52dfb75e..0b5781f4b9624 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -78,6 +78,7 @@ import ( "k8s.io/component-base/tracing" "k8s.io/klog/v2" openapicommon "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/utils/clock" utilsnet "k8s.io/utils/net" @@ -194,7 +195,7 @@ type Config struct { // OpenAPIConfig will be used in generating OpenAPI spec. This is nil by default. Use DefaultOpenAPIConfig for "working" defaults. OpenAPIConfig *openapicommon.Config // OpenAPIV3Config will be used in generating OpenAPI V3 spec. This is nil by default. Use DefaultOpenAPIV3Config for "working" defaults. - OpenAPIV3Config *openapicommon.Config + OpenAPIV3Config *openapicommon.OpenAPIV3Config // SkipOpenAPIInstallation avoids installing the OpenAPI handler if set to true. SkipOpenAPIInstallation bool @@ -482,8 +483,23 @@ func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, de } // DefaultOpenAPIV3Config provides the default OpenAPIV3Config used to build the OpenAPI V3 spec -func DefaultOpenAPIV3Config(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config { - defaultConfig := DefaultOpenAPIConfig(getDefinitions, defNamer) +func DefaultOpenAPIV3Config(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.OpenAPIV3Config { + defaultConfig := &openapicommon.OpenAPIV3Config{ + IgnorePrefixes: []string{}, + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "Generic API Server", + }, + }, + DefaultResponse: &spec3.Response{ + ResponseProps: spec3.ResponseProps{ + Description: "Default Response.", + }, + }, + GetOperationIDAndTags: apiopenapi.GetOperationIDAndTags, + GetDefinitionName: defNamer.GetDefinitionName, + GetDefinitions: getDefinitions, + } defaultConfig.Definitions = getDefinitions(func(name string) spec.Ref { defName, _ := defaultConfig.GetDefinitionName(name) return spec.MustCreateRef("#/components/schemas/" + openapicommon.EscapeJsonPointer(defName)) @@ -608,6 +624,45 @@ func completeOpenAPI(config *openapicommon.Config, version *version.Info) { } } +func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Info) { + if config == nil { + return + } + if config.SecuritySchemes != nil { + // Setup OpenAPI security: all APIs will have the same authentication for now. + config.DefaultSecurity = []map[string][]string{} + keys := []string{} + for k := range config.SecuritySchemes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + config.DefaultSecurity = append(config.DefaultSecurity, map[string][]string{k: {}}) + } + if config.CommonResponses == nil { + config.CommonResponses = map[int]*spec3.Response{} + } + if _, exists := config.CommonResponses[http.StatusUnauthorized]; !exists { + config.CommonResponses[http.StatusUnauthorized] = &spec3.Response{ + ResponseProps: spec3.ResponseProps{ + Description: "Unauthorized", + }, + } + } + } + // make sure we populate info, and info.version, if not manually set + if config.Info == nil { + config.Info = &spec.Info{} + } + if config.Info.Version == "" { + if version != nil { + config.Info.Version = strings.Split(version.String(), "-")[0] + } else { + config.Info.Version = "unversioned" + } + } +} + // DrainedNotify returns a lifecycle signal of genericapiserver already drained while shutting down. func (c *Config) DrainedNotify() <-chan struct{} { return c.lifecycleSignals.InFlightRequestsDrained.Signaled() @@ -633,7 +688,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } completeOpenAPI(c.OpenAPIConfig, c.Version) - completeOpenAPI(c.OpenAPIV3Config, c.Version) + completeOpenAPIV3(c.OpenAPIV3Config, c.Version) if c.DiscoveryAddresses == nil { c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} @@ -669,6 +724,12 @@ func (c *RecommendedConfig) Complete() CompletedConfig { return c.Config.Complete(c.SharedInformerFactory) } +var allowedMediaTypes = []string{ + runtime.ContentTypeJSON, + runtime.ContentTypeYAML, + runtime.ContentTypeProtobuf, +} + // New creates a new server which logically combines the handling chain with the passed server. // name is used to differentiate for logging. The handler chain in particular can be difficult as it starts delegating. // delegationTarget may not be nil. @@ -676,6 +737,18 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G if c.Serializer == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil") } + for _, info := range c.Serializer.SupportedMediaTypes() { + var ok bool + for _, mt := range allowedMediaTypes { + if info.MediaType == mt { + ok = true + break + } + } + if !ok { + return nil, fmt.Errorf("refusing to create new apiserver %q with support for media type %q (allowed media types are: %s)", name, info.MediaType, strings.Join(allowedMediaTypes, ", ")) + } + } if c.LoopbackClientConfig == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.LoopbackClientConfig == nil") } @@ -915,7 +988,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { requestWorkEstimator := flowcontrolrequest.NewWorkEstimator( c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg, c.FlowControl.GetMaxSeats) handler = filterlatency.TrackCompleted(handler) - handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator, c.RequestTimeout/4) handler = filterlatency.TrackStarted(handler, c.TracerProvider, "priorityandfairness") } else { handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) @@ -994,14 +1067,10 @@ func installAPI(s *GenericAPIServer, c *Config) { if c.EnableMetrics { if c.EnableProfiling { routes.MetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) - } + slis.SLIMetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) } else { routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetrics{}.Install(s.Handler.NonGoRestfulMux) - } + slis.SLIMetrics{}.Install(s.Handler.NonGoRestfulMux) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/config_test.go index 6e4ef65ef1a33..a1d6d8902f98f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -362,3 +363,21 @@ func (b *testBackend) ProcessEvents(events ...*auditinternal.Event) bool { b.events = append(b.events, events...) return true } + +func TestNewErrorForbiddenSerializer(t *testing.T) { + config := CompletedConfig{ + &completedConfig{ + Config: &Config{ + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: "application/cbor", + }), + }, + }, + } + _, err := config.New("test", NewEmptyDelegate()) + if err == nil { + t.Error("successfully created a new server configured with cbor support") + } else if err.Error() != `refusing to create new apiserver "test" with support for media type "application/cbor" (allowed media types are: application/json, application/yaml, application/vnd.kubernetes.protobuf)` { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go index 2b9861ae9d249..6effe442c4e83 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go @@ -541,9 +541,9 @@ func TestValidateEgressSelectorConfiguration(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { errs := ValidateEgressSelectorConfiguration(tc.contents) - if tc.expectError == false && len(errs) != 0 { + if !tc.expectError && len(errs) != 0 { t.Errorf("Calling ValidateEgressSelectorConfiguration expected no error, got %v", errs) - } else if tc.expectError == true && len(errs) == 0 { + } else if tc.expectError && len(errs) == 0 { t.Errorf("Calling ValidateEgressSelectorConfiguration expected error, got no error") } }) diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 6b39877816019..05cc44263fbf6 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -35,6 +35,7 @@ import ( fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/klog/v2" + utilsclock "k8s.io/utils/clock" ) // PriorityAndFairnessClassification identifies the results of @@ -78,6 +79,10 @@ type priorityAndFairnessHandler struct { // the purpose of computing RetryAfter header to avoid system // overload. droppedRequests utilflowcontrol.DroppedRequestsTracker + + // newReqWaitCtxFn creates a derived context with a deadline + // of how long a given request can wait in its queue. + newReqWaitCtxFn func(context.Context) (context.Context, context.CancelFunc) } func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) { @@ -240,8 +245,9 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque resultCh <- err }() - // We create handleCtx with explicit cancelation function. - // The reason for it is that Handle() underneath may start additional goroutine + // We create handleCtx with an adjusted deadline, for two reasons. + // One is to limit the time the request waits before its execution starts. + // The other reason for it is that Handle() underneath may start additional goroutine // that is blocked on context cancellation. However, from APF point of view, // we don't want to wait until the whole watch request is processed (which is // when it context is actually cancelled) - we want to unblock the goroutine as @@ -249,7 +255,7 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque // // Note that we explicitly do NOT call the actuall handler using that context // to avoid cancelling request too early. - handleCtx, handleCtxCancel := context.WithCancel(ctx) + handleCtx, handleCtxCancel := h.newReqWaitCtxFn(ctx) defer handleCtxCancel() // Note that Handle will return irrespective of whether the request @@ -286,7 +292,11 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque h.handler.ServeHTTP(w, r) } - h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute) + func() { + handleCtx, cancelFn := h.newReqWaitCtxFn(ctx) + defer cancelFn() + h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute) + }() } if !served { @@ -309,6 +319,7 @@ func WithPriorityAndFairness( longRunningRequestCheck apirequest.LongRunningRequestCheck, fcIfc utilflowcontrol.Interface, workEstimator flowcontrolrequest.WorkEstimatorFunc, + defaultRequestWaitLimit time.Duration, ) http.Handler { if fcIfc == nil { klog.Warningf("priority and fairness support not found, skipping") @@ -322,12 +333,18 @@ func WithPriorityAndFairness( waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency() }) + clock := &utilsclock.RealClock{} + newReqWaitCtxFn := func(ctx context.Context) (context.Context, context.CancelFunc) { + return getRequestWaitContext(ctx, defaultRequestWaitLimit, clock) + } + priorityAndFairnessHandler := &priorityAndFairnessHandler{ handler: handler, longRunningRequestCheck: longRunningRequestCheck, fcIfc: fcIfc, workEstimator: workEstimator, droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(), + newReqWaitCtxFn: newReqWaitCtxFn, } return http.HandlerFunc(priorityAndFairnessHandler.Handle) } @@ -356,3 +373,48 @@ func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string w.Header().Set("Retry-After", retryAfter) http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) } + +// getRequestWaitContext returns a new context with a deadline of how +// long the request is allowed to wait before it is removed from its +// queue and rejected. +// The context.CancelFunc returned must never be nil and the caller is +// responsible for calling the CancelFunc function for cleanup. +// - ctx: the context associated with the request (it may or may +// not have a deadline). +// - defaultRequestWaitLimit: the default wait duration that is used +// if the request context does not have any deadline. +// (a) initialization of a watch or +// (b) a request whose context has no deadline +// +// clock comes in handy for testing the function +func getRequestWaitContext(ctx context.Context, defaultRequestWaitLimit time.Duration, clock utilsclock.PassiveClock) (context.Context, context.CancelFunc) { + if ctx.Err() != nil { + return ctx, func() {} + } + + reqArrivedAt := clock.Now() + if reqReceivedTimestamp, ok := apirequest.ReceivedTimestampFrom(ctx); ok { + reqArrivedAt = reqReceivedTimestamp + } + + // a) we will allow the request to wait in the queue for one + // fourth of the time of its allotted deadline. + // b) if the request context does not have any deadline + // then we default to 'defaultRequestWaitLimit' + // in any case, the wait limit for any request must not + // exceed the hard limit of 1m + // + // request has deadline: + // wait-limit = min(remaining deadline / 4, 1m) + // request has no deadline: + // wait-limit = min(defaultRequestWaitLimit, 1m) + thisReqWaitLimit := defaultRequestWaitLimit + if deadline, ok := ctx.Deadline(); ok { + thisReqWaitLimit = deadline.Sub(reqArrivedAt) / 4 + } + if thisReqWaitLimit > time.Minute { + thisReqWaitLimit = time.Minute + } + + return context.WithDeadline(ctx, reqArrivedAt.Add(thisReqWaitLimit)) +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness_test.go b/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness_test.go index 81a17a5b18ed3..31ca992f595b8 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/priority-and-fairness_test.go @@ -51,6 +51,7 @@ import ( "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/testutil" "k8s.io/klog/v2" + clocktesting "k8s.io/utils/clock/testing" "github.com/google/go-cmp/cmp" ) @@ -153,23 +154,23 @@ func newApfServerWithHooks(t *testing.T, decision mockDecision, onExecute, postE WatchTracker: utilflowcontrol.NewWatchTracker(), MaxSeatsTracker: utilflowcontrol.NewMaxSeatsTracker(), } - return newApfServerWithFilter(t, fakeFilter, onExecute, postExecute) + return newApfServerWithFilter(t, fakeFilter, time.Minute/4, onExecute, postExecute) } -func newApfServerWithFilter(t *testing.T, flowControlFilter utilflowcontrol.Interface, onExecute, postExecute func()) *httptest.Server { +func newApfServerWithFilter(t *testing.T, flowControlFilter utilflowcontrol.Interface, defaultWaitLimit time.Duration, onExecute, postExecute func()) *httptest.Server { epmetrics.Register() fcmetrics.Register() - apfServer := httptest.NewServer(newApfHandlerWithFilter(t, flowControlFilter, onExecute, postExecute)) + apfServer := httptest.NewServer(newApfHandlerWithFilter(t, flowControlFilter, defaultWaitLimit, onExecute, postExecute)) return apfServer } -func newApfHandlerWithFilter(t *testing.T, flowControlFilter utilflowcontrol.Interface, onExecute, postExecute func()) http.Handler { +func newApfHandlerWithFilter(t *testing.T, flowControlFilter utilflowcontrol.Interface, defaultWaitLimit time.Duration, onExecute, postExecute func()) http.Handler { requestInfoFactory := &apirequest.RequestInfoFactory{APIPrefixes: sets.NewString("apis", "api"), GrouplessAPIPrefixes: sets.NewString("api")} longRunningRequestCheck := BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString("proxy")) apfHandler := WithPriorityAndFairness(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { onExecute() - }), longRunningRequestCheck, flowControlFilter, defaultRequestWorkEstimator) + }), longRunningRequestCheck, flowControlFilter, defaultRequestWorkEstimator, defaultWaitLimit) handler := apifilters.WithRequestInfo(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { r = r.WithContext(apirequest.WithUser(r.Context(), &user.DefaultInfo{ @@ -458,7 +459,7 @@ func TestApfExecuteWatchRequestsWithInitializationSignal(t *testing.T) { postExecuteFunc := func() {} - server := newApfServerWithFilter(t, fakeFilter, onExecuteFunc, postExecuteFunc) + server := newApfServerWithFilter(t, fakeFilter, time.Minute/4, onExecuteFunc, postExecuteFunc) defer server.Close() var wg sync.WaitGroup @@ -498,7 +499,7 @@ func TestApfRejectWatchRequestsWithInitializationSignal(t *testing.T) { } postExecuteFunc := func() {} - server := newApfServerWithFilter(t, fakeFilter, onExecuteFunc, postExecuteFunc) + server := newApfServerWithFilter(t, fakeFilter, time.Minute/4, onExecuteFunc, postExecuteFunc) defer server.Close() if err := expectHTTPGet(fmt.Sprintf("%s/api/v1/namespaces/default/pods?watch=true", server.URL), http.StatusTooManyRequests); err != nil { @@ -517,7 +518,7 @@ func TestApfWatchPanic(t *testing.T) { } postExecuteFunc := func() {} - apfHandler := newApfHandlerWithFilter(t, fakeFilter, onExecuteFunc, postExecuteFunc) + apfHandler := newApfHandlerWithFilter(t, fakeFilter, time.Minute/4, onExecuteFunc, postExecuteFunc) handler := func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err == nil { @@ -564,7 +565,7 @@ func TestApfWatchHandlePanic(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - apfHandler := newApfHandlerWithFilter(t, test.filter, onExecuteFunc, postExecuteFunc) + apfHandler := newApfHandlerWithFilter(t, test.filter, time.Minute/4, onExecuteFunc, postExecuteFunc) handler := func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err == nil { @@ -649,6 +650,7 @@ func TestApfWithRequestDigest(t *testing.T) { longRunningFunc, fakeFilter, func(_ *http.Request, _, _ string) fcrequest.WorkEstimate { return workExpected }, + time.Minute/4, ) w := httptest.NewRecorder() @@ -676,7 +678,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Run("priority level concurrency is set to 1, request handler panics, next request should not be rejected", func(t *testing.T) { const ( - requestTimeout = 1 * time.Minute userName = "alice" fsName = "test-fs" plName = "test-pl" @@ -685,53 +686,58 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { apfConfiguration := newConfiguration(fsName, plName, userName, plConcurrencyShares, 0) stopCh := make(chan struct{}) - controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, requestTimeout/4, plName, plConcurrency) + controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, plName, plConcurrency) headerMatcher := headerMatcher{} - var executed bool // we will raise a panic for the first request. - firstRequestPathPanic := "/request/panic-as-designed" + firstRequestPathPanic, secondRequestPathShouldWork := "/request/panic-as-designed", "/request/should-succeed-as-expected" + firstHandlerDoneCh, secondHandlerDoneCh := make(chan struct{}), make(chan struct{}) requestHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - executed = true - headerMatcher.inspect(w, fsName, plName) - - if r.URL.Path == firstRequestPathPanic { + headerMatcher.inspect(t, w, fsName, plName) + switch { + case r.URL.Path == firstRequestPathPanic: + close(firstHandlerDoneCh) panic(fmt.Errorf("request handler panic'd as designed - %#v", r.RequestURI)) + case r.URL.Path == secondRequestPathShouldWork: + close(secondHandlerDoneCh) + } }) - handler := newHandlerChain(t, requestHandler, controller, userName, requestTimeout) - server, requestGetter := newHTTP2ServerWithClient(handler, requestTimeout*2) + // NOTE: the server will enforce a 1m timeout on every incoming + // request, and the client enforces a timeout of 2m. + handler := newHandlerChain(t, requestHandler, controller, userName, time.Minute) + server, requestGetter := newHTTP2ServerWithClient(handler, 2*time.Minute) defer server.Close() // we send two requests synchronously, one at a time // - first request is expected to panic as designed - // - second request is expected to success + // - second request is expected to succeed _, err := requestGetter(firstRequestPathPanic) - if !executed { - t.Errorf("Expected inner handler to be executed for request: %q", firstRequestPathPanic) + + // did the server handler panic, as expected? + select { + case <-firstHandlerDoneCh: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected the server handler to panic for request: %q", firstRequestPathPanic) } if isClientTimeout(err) { t.Fatalf("the client has unexpectedly timed out - request: %q error: %s", firstRequestPathPanic, err.Error()) } expectResetStreamError(t, err) - executed = false // the second request should be served successfully. - secondRequestPathShouldWork := "/request/should-succeed-as-expected" response, err := requestGetter(secondRequestPathShouldWork) - if !executed { - t.Errorf("Expected inner handler to be executed for request: %s", secondRequestPathShouldWork) - } if err != nil { t.Fatalf("Expected request: %q to get a response, but got error: %#v", secondRequestPathShouldWork, err) } if response.StatusCode != http.StatusOK { t.Errorf("Expected HTTP status code: %d for request: %q, but got: %#v", http.StatusOK, secondRequestPathShouldWork, response) } - - for _, err := range headerMatcher.errors() { - t.Errorf("Expected APF headers to match, but got: %v", err) + select { + case <-secondHandlerDoneCh: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected the server handler to have completed: %q", secondRequestPathShouldWork) } close(stopCh) @@ -746,7 +752,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Run("priority level concurrency is set to 1, request times out and inner handler hasn't written to the response yet", func(t *testing.T) { t.Parallel() const ( - requestTimeout = 5 * time.Second userName = "alice" fsName = "test-fs" plName = "test-pl" @@ -755,15 +760,13 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { apfConfiguration := newConfiguration(fsName, plName, userName, plConcurrencyShares, 0) stopCh := make(chan struct{}) - controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, requestTimeout/4, plName, plConcurrency) + controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, plName, plConcurrency) headerMatcher := headerMatcher{} - var executed bool rquestTimesOutPath := "/request/time-out-as-designed" reqHandlerCompletedCh, callerRoundTripDoneCh := make(chan struct{}), make(chan struct{}) requestHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - executed = true - headerMatcher.inspect(w, fsName, plName) + headerMatcher.inspect(t, w, fsName, plName) if r.URL.Path == rquestTimesOutPath { defer close(reqHandlerCompletedCh) @@ -772,13 +775,16 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { <-callerRoundTripDoneCh } }) - handler := newHandlerChain(t, requestHandler, controller, userName, requestTimeout) - server, requestGetter := newHTTP2ServerWithClient(handler, requestTimeout*2) + // NOTE: the server will enforce a 5s timeout on every + // incoming request, and the client enforces a timeout of 1m. + handler := newHandlerChain(t, requestHandler, controller, userName, 5*time.Second) + server, requestGetter := newHTTP2ServerWithClient(handler, time.Minute) defer server.Close() - // send a request synchronously with a client timeout of requestTimeout*2 seconds - // this ensures the test does not block indefinitely if the server does not respond. + // send a request synchronously with a client timeout of 1m, this minimizes the + // chance of a flake in ci, the cient waits long enough for the server to send a + // timeout response to the client. var ( response *http.Response err error @@ -793,11 +799,12 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { } }() - if !executed { - t.Errorf("Expected inner handler to be executed for request: %q", rquestTimesOutPath) - } t.Logf("Waiting for the inner handler of the request: %q to complete", rquestTimesOutPath) - <-reqHandlerCompletedCh + select { + case <-reqHandlerCompletedCh: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected the server handler to have completed: %q", rquestTimesOutPath) + } if err != nil { t.Fatalf("Expected request: %q to get a response, but got error: %#v", rquestTimesOutPath, err) @@ -806,10 +813,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Errorf("Expected HTTP status code: %d for request: %q, but got: %#v", http.StatusGatewayTimeout, rquestTimesOutPath, response) } - for _, err := range headerMatcher.errors() { - t.Errorf("Expected APF headers to match, but got: %v", err) - } - close(stopCh) t.Log("Waiting for the controller to shutdown") @@ -822,7 +825,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Run("priority level concurrency is set to 1, inner handler panics after the request times out", func(t *testing.T) { t.Parallel() const ( - requestTimeout = 5 * time.Second userName = "alice" fsName = "test-fs" plName = "test-pl" @@ -831,33 +833,35 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { apfConfiguration := newConfiguration(fsName, plName, userName, plConcurrencyShares, 0) stopCh := make(chan struct{}) - controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, requestTimeout/4, plName, plConcurrency) + controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, plName, plConcurrency) headerMatcher := headerMatcher{} - var innerHandlerWriteErr error - reqHandlerCompletedCh, callerRoundTripDoneCh := make(chan struct{}), make(chan struct{}) + reqHandlerErrCh, callerRoundTripDoneCh := make(chan error, 1), make(chan struct{}) rquestTimesOutPath := "/request/time-out-as-designed" requestHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - headerMatcher.inspect(w, fsName, plName) + headerMatcher.inspect(t, w, fsName, plName) if r.URL.Path == rquestTimesOutPath { - defer close(reqHandlerCompletedCh) <-callerRoundTripDoneCh // we expect the timeout handler to have timed out this request by now and any attempt // to write to the response should return a http.ErrHandlerTimeout error. - _, innerHandlerWriteErr = w.Write([]byte("foo")) + _, innerHandlerWriteErr := w.Write([]byte("foo")) + reqHandlerErrCh <- innerHandlerWriteErr panic(http.ErrAbortHandler) } }) - handler := newHandlerChain(t, requestHandler, controller, userName, requestTimeout) - server, requestGetter := newHTTP2ServerWithClient(handler, requestTimeout*2) + // NOTE: the server will enforce a 5s timeout on every + // incoming request, and the client enforces a timeout of 1m. + handler := newHandlerChain(t, requestHandler, controller, userName, 5*time.Second) + server, requestGetter := newHTTP2ServerWithClient(handler, time.Minute) defer server.Close() - // send a request synchronously with a client timeout of requestTimeout*2 seconds - // this ensures the test does not block indefinitely if the server does not respond. + // send a request synchronously with a client timeout of 1m, this minimizes the + // chance of a flake in ci, the cient waits long enough for the server to send a + // timeout response to the client. var ( response *http.Response err error @@ -872,11 +876,15 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { }() t.Logf("Waiting for the inner handler of the request: %q to complete", rquestTimesOutPath) - <-reqHandlerCompletedCh - - if innerHandlerWriteErr != http.ErrHandlerTimeout { - t.Fatalf("Expected error: %#v, but got: %#v", http.ErrHandlerTimeout, err) + select { + case innerHandlerWriteErr := <-reqHandlerErrCh: + if innerHandlerWriteErr != http.ErrHandlerTimeout { + t.Fatalf("Expected error: %#v, but got: %#v", http.ErrHandlerTimeout, err) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected the server handler to have completed: %q", rquestTimesOutPath) } + if err != nil { t.Fatalf("Expected request: %q to get a response, but got error: %#v", rquestTimesOutPath, err) } @@ -884,10 +892,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Errorf("Expected HTTP status code: %d for request: %q, but got: %#v", http.StatusGatewayTimeout, rquestTimesOutPath, response) } - for _, err := range headerMatcher.errors() { - t.Errorf("Expected APF headers to match, but got: %v", err) - } - close(stopCh) t.Log("Waiting for the controller to shutdown") @@ -900,7 +904,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Run("priority level concurrency is set to 1, inner handler writes to the response before request times out", func(t *testing.T) { t.Parallel() const ( - requestTimeout = 5 * time.Second userName = "alice" fsName = "test-fs" plName = "test-pl" @@ -909,17 +912,15 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { apfConfiguration := newConfiguration(fsName, plName, userName, plConcurrencyShares, 0) stopCh := make(chan struct{}) - controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, requestTimeout/4, plName, plConcurrency) + controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, plName, plConcurrency) headerMatcher := headerMatcher{} - var innerHandlerWriteErr error rquestTimesOutPath := "/request/time-out-as-designed" - reqHandlerCompletedCh, callerRoundTripDoneCh := make(chan struct{}), make(chan struct{}) + reqHandlerErrCh, callerRoundTripDoneCh := make(chan error, 1), make(chan struct{}) requestHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - headerMatcher.inspect(w, fsName, plName) + headerMatcher.inspect(t, w, fsName, plName) if r.URL.Path == rquestTimesOutPath { - defer close(reqHandlerCompletedCh) // inner handler writes header and then let the request time out. w.WriteHeader(http.StatusBadRequest) @@ -927,14 +928,20 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { // we expect the timeout handler to have timed out this request by now and any attempt // to write to the response should return a http.ErrHandlerTimeout error. - _, innerHandlerWriteErr = w.Write([]byte("foo")) + _, innerHandlerWriteErr := w.Write([]byte("foo")) + reqHandlerErrCh <- innerHandlerWriteErr } }) - handler := newHandlerChain(t, requestHandler, controller, userName, requestTimeout) - server, requestGetter := newHTTP2ServerWithClient(handler, requestTimeout*2) + // NOTE: the server will enforce a 5s timeout on every + // incoming request, and the client enforces a timeout of 1m. + handler := newHandlerChain(t, requestHandler, controller, userName, 5*time.Second) + server, requestGetter := newHTTP2ServerWithClient(handler, time.Minute) defer server.Close() + // send a request synchronously with a client timeout of 1m, this minimizes the + // chance of a flake in ci, the cient waits long enough for the server to send a + // timeout response to the client. var err error func() { defer close(callerRoundTripDoneCh) @@ -946,16 +953,16 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { }() t.Logf("Waiting for the inner handler of the request: %q to complete", rquestTimesOutPath) - <-reqHandlerCompletedCh - - if innerHandlerWriteErr != http.ErrHandlerTimeout { - t.Fatalf("Expected error: %#v, but got: %#v", http.ErrHandlerTimeout, err) + select { + case innerHandlerWriteErr := <-reqHandlerErrCh: + if innerHandlerWriteErr != http.ErrHandlerTimeout { + t.Fatalf("Expected error: %#v, but got: %#v", http.ErrHandlerTimeout, err) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected the server handler to have completed: %q", rquestTimesOutPath) } - expectResetStreamError(t, err) - for _, err := range headerMatcher.errors() { - t.Errorf("Expected APF headers to match, but got: %v", err) - } + expectResetStreamError(t, err) close(stopCh) t.Log("Waiting for the controller to shutdown") @@ -975,7 +982,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { } const ( - requestTimeout = 5 * time.Second userName = "alice" fsName = "test-fs" plName = "test-pl" @@ -984,21 +990,16 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { apfConfiguration := newConfiguration(fsName, plName, userName, plConcurrencyShares, queueLength) stopCh := make(chan struct{}) - controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, requestTimeout/4, plName, plConcurrency) + controller, controllerCompletedCh := startAPFController(t, stopCh, apfConfiguration, serverConcurrency, plName, plConcurrency) headerMatcher := headerMatcher{} - var firstRequestInnerHandlerWriteErr error - var secondRequestExecuted bool - firstRequestTimesOutPath := "/request/first/time-out-as-designed" - secondRequestEnqueuedPath := "/request/second/enqueued-as-designed" - firstReqHandlerCompletedCh, firstReqInProgressCh := make(chan struct{}), make(chan struct{}) + firstRequestTimesOutPath, secondRequestEnqueuedPath := "/request/first/time-out-as-designed", "/request/second/enqueued-as-designed" + firstReqHandlerErrCh, firstReqInProgressCh := make(chan error, 1), make(chan struct{}) firstReqRoundTripDoneCh, secondReqRoundTripDoneCh := make(chan struct{}), make(chan struct{}) requestHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - headerMatcher.inspect(w, fsName, plName) - - if r.URL.Path == firstRequestTimesOutPath { - defer close(firstReqHandlerCompletedCh) - + headerMatcher.inspect(t, w, fsName, plName) + switch { + case r.URL.Path == firstRequestTimesOutPath: close(firstReqInProgressCh) <-firstReqRoundTripDoneCh @@ -1008,24 +1009,25 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { // we expect the timeout handler to have timed out this request by now and any attempt // to write to the response should return a http.ErrHandlerTimeout error. - _, firstRequestInnerHandlerWriteErr = w.Write([]byte("foo")) - return - } + _, firstRequestInnerHandlerWriteErr := w.Write([]byte("foo")) + firstReqHandlerErrCh <- firstRequestInnerHandlerWriteErr - if r.URL.Path == secondRequestEnqueuedPath { + case r.URL.Path == secondRequestEnqueuedPath: // we expect the concurrency to be set to 1 and so this request should never be executed. - secondRequestExecuted = true + t.Errorf("Expected second request to be enqueued: %q", secondRequestEnqueuedPath) } }) - handler := newHandlerChain(t, requestHandler, controller, userName, requestTimeout) - server, requestGetter := newHTTP2ServerWithClient(handler, requestTimeout*2) + // NOTE: the server will enforce a 5s timeout on every + // incoming request, and the client enforces a timeout of 1m. + handler := newHandlerChain(t, requestHandler, controller, userName, 5*time.Second) + server, requestGetter := newHTTP2ServerWithClient(handler, time.Minute) defer server.Close() // This test involves two requests sent to the same priority level, which has 1 queue and // a concurrency limit of 1. The handler chain include the timeout filter. - // Each request is sent from a separate goroutine, with a client-side timeout that is - // double the timeout filter's limit. + // Each request is sent from a separate goroutine, with a client-side timeout of 1m, on + // the other hand, the server enforces a timeout of 5s (via the timeout filter). // The first request should get dispatched immediately; execution (a) starts with closing // the channel that triggers the second client goroutine to send its request and then (b) // waits for both client goroutines to have gotten a response (expected to be timeouts). @@ -1067,12 +1069,16 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Fatalf("the client has unexpectedly timed out - request: %q error: %s", firstRequestTimesOutPath, fmtError(firstReqResult.err)) } t.Logf("Waiting for the inner handler of the request: %q to complete", firstRequestTimesOutPath) - <-firstReqHandlerCompletedCh + select { + case firstRequestInnerHandlerWriteErr := <-firstReqHandlerErrCh: + if firstRequestInnerHandlerWriteErr != http.ErrHandlerTimeout { + t.Fatalf("Expected error: %#v, but got: %s", http.ErrHandlerTimeout, fmtError(firstRequestInnerHandlerWriteErr)) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected the server handler to have completed: %q", firstRequestTimesOutPath) + } // first request is expected to time out. - if firstRequestInnerHandlerWriteErr != http.ErrHandlerTimeout { - t.Fatalf("Expected error: %#v, but got: %s", http.ErrHandlerTimeout, fmtError(firstRequestInnerHandlerWriteErr)) - } if isStreamReset(firstReqResult.err) || firstReqResult.response.StatusCode != http.StatusGatewayTimeout { // got what was expected } else if firstReqResult.err != nil { @@ -1086,9 +1092,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { if isClientTimeout(secondReqResult.err) { t.Fatalf("the client has unexpectedly timed out - request: %q error: %s", secondRequestEnqueuedPath, fmtError(secondReqResult.err)) } - if secondRequestExecuted { - t.Errorf("Expected second request to be enqueued: %q", secondRequestEnqueuedPath) - } if isStreamReset(secondReqResult.err) || secondReqResult.response.StatusCode == http.StatusTooManyRequests || secondReqResult.response.StatusCode == http.StatusGatewayTimeout { // got what was expected } else if secondReqResult.err != nil { @@ -1097,10 +1100,6 @@ func TestPriorityAndFairnessWithPanicRecoveryAndTimeoutFilter(t *testing.T) { t.Errorf("Expected HTTP status code: %d or %d for request: %q, but got: %#+v", http.StatusTooManyRequests, http.StatusGatewayTimeout, secondRequestEnqueuedPath, secondReqResult.response) } - for _, err := range headerMatcher.errors() { - t.Errorf("Expected APF headers to match, but got: %v", err) - } - close(stopCh) t.Log("Waiting for the controller to shutdown") @@ -1116,11 +1115,11 @@ func fmtError(err error) string { } func startAPFController(t *testing.T, stopCh <-chan struct{}, apfConfiguration []runtime.Object, serverConcurrency int, - requestWaitLimit time.Duration, plName string, plConcurrency int) (utilflowcontrol.Interface, <-chan error) { + plName string, plConcurrency int) (utilflowcontrol.Interface, <-chan error) { clientset := newClientset(t, apfConfiguration...) // this test does not rely on resync, so resync period is set to zero factory := informers.NewSharedInformerFactory(clientset, 0) - controller := utilflowcontrol.New(factory, clientset.FlowcontrolV1beta3(), serverConcurrency, requestWaitLimit) + controller := utilflowcontrol.New(factory, clientset.FlowcontrolV1beta3(), serverConcurrency) factory.Start(stopCh) @@ -1167,13 +1166,11 @@ func newHTTP2ServerWithClient(handler http.Handler, clientTimeout time.Duration) } } -type headerMatcher struct { - lock sync.Mutex - errsGot []error -} +type headerMatcher struct{} // verifies that the expected flow schema and priority level UIDs are attached to the header. -func (m *headerMatcher) inspect(w http.ResponseWriter, expectedFS, expectedPL string) { +func (m *headerMatcher) inspect(t *testing.T, w http.ResponseWriter, expectedFS, expectedPL string) { + t.Helper() err := func() error { if w == nil { return fmt.Errorf("expected a non nil HTTP response") @@ -1193,16 +1190,7 @@ func (m *headerMatcher) inspect(w http.ResponseWriter, expectedFS, expectedPL st if err == nil { return } - - m.lock.Lock() - defer m.lock.Unlock() - m.errsGot = append(m.errsGot, err) -} - -func (m *headerMatcher) errors() []error { - m.lock.Lock() - defer m.lock.Unlock() - return m.errsGot[:] + t.Errorf("Expected APF headers to match, but got: %v", err) } // when a request panics, http2 resets the stream with an INTERNAL_ERROR message @@ -1231,7 +1219,7 @@ func newHandlerChain(t *testing.T, handler http.Handler, filter utilflowcontrol. requestInfoFactory := &apirequest.RequestInfoFactory{APIPrefixes: sets.NewString("apis", "api"), GrouplessAPIPrefixes: sets.NewString("api")} longRunningRequestCheck := BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString("proxy")) - apfHandler := WithPriorityAndFairness(handler, longRunningRequestCheck, filter, defaultRequestWorkEstimator) + apfHandler := WithPriorityAndFairness(handler, longRunningRequestCheck, filter, defaultRequestWorkEstimator, time.Minute/4) // add the handler in the chain that adds the specified user to the request context handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1407,3 +1395,107 @@ func isStreamReset(err error) bool { } return false } + +func TestGetRequestWaitContext(t *testing.T) { + tests := []struct { + name string + defaultRequestWaitLimit time.Duration + parent func(t time.Time) (context.Context, context.CancelFunc) + newReqWaitCtxExpected bool + reqWaitLimitExpected time.Duration + }{ + { + name: "context deadline has exceeded", + parent: func(time.Time) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx, cancel + }, + }, + { + name: "context has a deadline, 'received at' is not set, wait limit should be one fourth of the remaining deadline from now", + parent: func(now time.Time) (context.Context, context.CancelFunc) { + return context.WithDeadline(context.Background(), now.Add(60*time.Second)) + }, + newReqWaitCtxExpected: true, + reqWaitLimitExpected: 15 * time.Second, + }, + { + name: "context has a deadline, 'received at' is set, wait limit should be one fourth of the deadline starting from the 'received at' time", + parent: func(now time.Time) (context.Context, context.CancelFunc) { + ctx := apirequest.WithReceivedTimestamp(context.Background(), now.Add(-10*time.Second)) + return context.WithDeadline(ctx, now.Add(50*time.Second)) + }, + newReqWaitCtxExpected: true, + reqWaitLimitExpected: 5 * time.Second, // from now + }, + { + name: "context does not have any deadline, 'received at' is not set, default wait limit should be in effect from now", + defaultRequestWaitLimit: 15 * time.Second, + parent: func(time.Time) (context.Context, context.CancelFunc) { + return context.WithCancel(context.Background()) + }, + newReqWaitCtxExpected: true, + reqWaitLimitExpected: 15 * time.Second, + }, + { + name: "context does not have any deadline, 'received at' is set, default wait limit should be in effect starting from the 'received at' time", + defaultRequestWaitLimit: 15 * time.Second, + parent: func(now time.Time) (context.Context, context.CancelFunc) { + ctx := apirequest.WithReceivedTimestamp(context.Background(), now.Add(-10*time.Second)) + return context.WithCancel(ctx) + }, + newReqWaitCtxExpected: true, + reqWaitLimitExpected: 5 * time.Second, // from now + }, + { + name: "context has a deadline, wait limit should not exceed the hard limit of 1m", + parent: func(now time.Time) (context.Context, context.CancelFunc) { + // let 1/4th of the remaining deadline exceed the hard limit + return context.WithDeadline(context.Background(), now.Add(8*time.Minute)) + }, + newReqWaitCtxExpected: true, + reqWaitLimitExpected: time.Minute, + }, + { + name: "context has no deadline, wait limit should not exceed the hard limit of 1m", + defaultRequestWaitLimit: 2 * time.Minute, // it exceeds the hard limit + parent: func(now time.Time) (context.Context, context.CancelFunc) { + return context.WithCancel(context.Background()) + }, + newReqWaitCtxExpected: true, + reqWaitLimitExpected: time.Minute, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + now := time.Now() + parent, cancel := test.parent(now) + defer cancel() + + clock := clocktesting.NewFakePassiveClock(now) + newReqWaitCtxGot, cancelGot := getRequestWaitContext(parent, test.defaultRequestWaitLimit, clock) + if cancelGot == nil { + t.Errorf("Expected a non nil context.CancelFunc") + return + } + defer cancelGot() + + switch { + case test.newReqWaitCtxExpected: + deadlineGot, ok := newReqWaitCtxGot.Deadline() + if !ok { + t.Errorf("Expected the new wait limit context to have a deadline") + } + if waitLimitGot := deadlineGot.Sub(now); test.reqWaitLimitExpected != waitLimitGot { + t.Errorf("Expected request wait limit %s, but got: %s", test.reqWaitLimitExpected, waitLimitGot) + } + default: + if parent != newReqWaitCtxGot { + t.Errorf("Expected the parent context to be returned: want: %#v, got %#v", parent, newReqWaitCtxGot) + } + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index 665f20bebdb06..450c7d4f64bf8 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -158,7 +158,7 @@ type GenericAPIServer struct { openAPIConfig *openapicommon.Config // Enable swagger and/or OpenAPI V3 if these configs are non-nil. - openAPIV3Config *openapicommon.Config + openAPIV3Config *openapicommon.OpenAPIV3Config // SkipOpenAPIInstallation indicates not to install the OpenAPI handler // during PrepareRun. @@ -430,11 +430,9 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { } if s.openAPIV3Config != nil && !s.skipOpenAPIInstallation { - if utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { - s.OpenAPIV3VersionedService = routes.OpenAPI{ - Config: s.openAPIV3Config, - }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) - } + s.OpenAPIV3VersionedService = routes.OpenAPI{ + V3Config: s.openAPIV3Config, + }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) } s.installHealthz() diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/api_enablement.go b/staging/src/k8s.io/apiserver/pkg/server/options/api_enablement.go index 13968b4e7d992..6ab58bab24947 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/api_enablement.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -42,6 +42,9 @@ func NewAPIEnablementOptions() *APIEnablementOptions { // AddFlags adds flags for a specific APIServer to the specified FlagSet func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { + if s == nil { + return + } fs.Var(&s.RuntimeConfig, "runtime-config", ""+ "A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+ "v1=true|false for the core API group\n"+ @@ -87,7 +90,6 @@ func (s *APIEnablementOptions) Validate(registries ...GroupRegistry) []error { // ApplyTo override MergedResourceConfig with defaults and registry func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error { - if s == nil { return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 4af2f1faf8703..a7b351539660f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -107,6 +107,26 @@ const ( var codecs serializer.CodecFactory +// this atomic bool allows us to swap enablement of the KMSv2KDF feature in tests +// as the feature gate is now locked to true starting with v1.29 +// Note: it cannot be set by an end user +var kdfDisabled atomic.Bool + +// this function should only be called in tests to swap enablement of the KMSv2KDF feature +func SetKDFForTests(b bool) func() { + kdfDisabled.Store(!b) + return func() { + kdfDisabled.Store(false) + } +} + +// this function should be used to determine enablement of the KMSv2KDF feature +// instead of getting it from DefaultFeatureGate as the feature gate is now locked +// to true starting with v1.29 +func GetKDF() bool { + return !kdfDisabled.Load() +} + func init() { configScheme := runtime.NewScheme() utilruntime.Must(apiserverconfig.AddToScheme(configScheme)) @@ -138,6 +158,7 @@ type kmsv2PluginProbe struct { lastResponse *kmsPluginHealthzResponse l *sync.Mutex apiServerID string + version string } type kmsHealthChecker []healthz.HealthChecker @@ -369,7 +390,7 @@ func (h *kmsv2PluginProbe) rotateDEKOnKeyIDChange(ctx context.Context, statusKey // this gate can only change during tests, but the check is cheap enough to always make // this allows us to easily exercise both modes without restarting the API server // TODO integration test that this dynamically takes effect - useSeed := utilfeature.DefaultFeatureGate.Enabled(features.KMSv2KDF) + useSeed := GetKDF() stateUseSeed := state.EncryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED // state is valid and status keyID is unchanged from when we generated this DEK/seed so there is no need to rotate it @@ -454,8 +475,16 @@ func (h *kmsv2PluginProbe) isKMSv2ProviderHealthyAndMaybeRotateDEK(ctx context.C if response.Healthz != "ok" { errs = append(errs, fmt.Errorf("got unexpected healthz status: %s", response.Healthz)) } - if response.Version != envelopekmsv2.KMSAPIVersion { - errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersion, response.Version)) + if response.Version != envelopekmsv2.KMSAPIVersionv2 && response.Version != envelopekmsv2.KMSAPIVersionv2beta1 { + errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersionv2, response.Version)) + } else { + // set version for the first status response + if len(h.version) == 0 { + h.version = response.Version + } + if h.version != response.Version { + errs = append(errs, fmt.Errorf("KMSv2 API version should not change after the initial status response version %s, got %s", h.version, response.Version)) + } } if errCode, err := envelopekmsv2.ValidateKeyID(response.KeyID); err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go index 46c76284b73e1..3a81e397d91de 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go @@ -187,7 +187,7 @@ func TestLegacyConfig(t *testing.T) { } func TestEncryptionProviderConfigCorrect(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2, true)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() // Set factory for mock envelope service factory := envelopeServiceFactory @@ -353,42 +353,33 @@ func TestKMSv1Deprecation(t *testing.T) { func TestKMSvsEnablement(t *testing.T) { testCases := []struct { - name string - kmsv2Enabled bool - filePath string - expectedErr string + name string + filePath string + expectedErr string }{ { - name: "config with kmsv2 and kmsv1, KMSv2=false", - kmsv2Enabled: false, - filePath: "testdata/valid-configs/kms/multiple-providers-kmsv2.yaml", - expectedErr: "KMSv2 feature is not enabled", - }, - { - name: "config with kmsv2 and kmsv1, KMSv2=true", - kmsv2Enabled: true, - filePath: "testdata/valid-configs/kms/multiple-providers-kmsv2.yaml", - expectedErr: "", + name: "config with kmsv2 and kmsv1, KMSv2=true, KMSv1=false, should fail when feature is disabled", + filePath: "testdata/valid-configs/kms/multiple-providers-mixed.yaml", + expectedErr: "KMSv1 is deprecated and will only receive security updates going forward. Use KMSv2 instead", }, { - name: "config with kmsv1, KMSv2=false", - kmsv2Enabled: false, - filePath: "testdata/valid-configs/kms/multiple-providers.yaml", - expectedErr: "", + name: "config with kmsv2, KMSv2=true, KMSv1=false", + filePath: "testdata/valid-configs/kms/multiple-providers-kmsv2.yaml", + expectedErr: "", }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - // Just testing KMSv2 feature flag - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() - - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2, testCase.kmsv2Enabled)() + // only the KMSv2 feature flag is enabled _, err := LoadEncryptionConfig(testContext(t), testCase.filePath, false, "") - if !strings.Contains(errString(err), testCase.expectedErr) { + if len(testCase.expectedErr) > 0 && !strings.Contains(errString(err), testCase.expectedErr) { t.Fatalf("expected error %q, got %q", testCase.expectedErr, errString(err)) } + if len(testCase.expectedErr) == 0 && err != nil { + t.Fatalf("unexpected error %q", errString(err)) + } }) } @@ -400,43 +391,6 @@ func TestKMSvsEnablement(t *testing.T) { config apiserverconfig.EncryptionConfiguration wantV2Used bool }{ - { - name: "with kmsv1 and kmsv2, KMSv2=false", - kmsv2Enabled: false, - config: apiserverconfig.EncryptionConfiguration{ - Resources: []apiserverconfig.ResourceConfiguration{ - { - Resources: []string{"secrets"}, - Providers: []apiserverconfig.ProviderConfiguration{ - { - KMS: &apiserverconfig.KMSConfiguration{ - Name: "kms", - APIVersion: "v1", - Timeout: &metav1.Duration{ - Duration: 1 * time.Second, - }, - Endpoint: "unix:///tmp/testprovider.sock", - CacheSize: pointer.Int32(1000), - }, - }, - { - KMS: &apiserverconfig.KMSConfiguration{ - Name: "another-kms", - APIVersion: "v2", - Timeout: &metav1.Duration{ - Duration: 1 * time.Second, - }, - Endpoint: "unix:///tmp/anothertestprovider.sock", - CacheSize: pointer.Int32(1000), - }, - }, - }, - }, - }, - }, - expectedErr: "KMSv2 feature is not enabled", - wantV2Used: false, - }, { name: "with kmsv1 and kmsv2, KMSv2=true", kmsv2Enabled: true, @@ -501,7 +455,7 @@ func TestKMSvsEnablement(t *testing.T) { } func TestKMSMaxTimeout(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2, true)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() testCases := []struct { name string @@ -749,7 +703,7 @@ func TestKMSMaxTimeout(t *testing.T) { } func TestKMSPluginHealthz(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2, true)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() kmsv2Probe := &kmsv2PluginProbe{ name: "foo", @@ -823,7 +777,7 @@ func TestKMSPluginHealthz(t *testing.T) { }, { desc: "Install multiple healthz with v1 and v2", - config: "testdata/valid-configs/kms/multiple-providers-kmsv2.yaml", + config: "testdata/valid-configs/kms/multiple-providers-mixed.yaml", want: []healthChecker{ kmsv2Probe, &kmsPluginProbe{ @@ -900,6 +854,7 @@ func TestKMSPluginHealthz(t *testing.T) { // tests for masking rules func TestWildcardMasking(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() testCases := []struct { desc string @@ -1308,7 +1263,7 @@ func TestWildcardMasking(t *testing.T) { } func TestWildcardStructure(t *testing.T) { - + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() testCases := []struct { desc string expectedResourceTransformers map[string]string @@ -1752,7 +1707,7 @@ func TestIsKMSv2ProviderHealthyError(t *testing.T) { statusResponse: &kmsservice.StatusResponse{ Healthz: "unhealthy", }, - expectedErr: "got unexpected healthz status: unhealthy, expected KMSv2 API version v2beta1, got , got invalid KMSv2 KeyID ", + expectedErr: "got unexpected healthz status: unhealthy, expected KMSv2 API version v2, got , got invalid KMSv2 KeyID ", wantMetrics: ` # HELP apiserver_envelope_encryption_invalid_key_id_from_status_total [ALPHA] Number of times an invalid keyID is returned by the Status RPC call split by error. # TYPE apiserver_envelope_encryption_invalid_key_id_from_status_total counter @@ -1760,11 +1715,11 @@ func TestIsKMSv2ProviderHealthyError(t *testing.T) { `, }, { - desc: "version is not v2beta1", + desc: "version is not v2", statusResponse: &kmsservice.StatusResponse{ Version: "v1beta1", }, - expectedErr: "got unexpected healthz status: , expected KMSv2 API version v2beta1, got v1beta1, got invalid KMSv2 KeyID ", + expectedErr: "got unexpected healthz status: , expected KMSv2 API version v2, got v1beta1, got invalid KMSv2 KeyID ", wantMetrics: ` # HELP apiserver_envelope_encryption_invalid_key_id_from_status_total [ALPHA] Number of times an invalid keyID is returned by the Status RPC call split by error. # TYPE apiserver_envelope_encryption_invalid_key_id_from_status_total counter @@ -1788,7 +1743,7 @@ func TestIsKMSv2ProviderHealthyError(t *testing.T) { desc: "invalid long keyID", statusResponse: &kmsservice.StatusResponse{ Healthz: "ok", - Version: "v2beta1", + Version: "v2", KeyID: sampleInvalidKeyID, }, expectedErr: "got invalid KMSv2 KeyID ", @@ -1816,6 +1771,52 @@ func TestIsKMSv2ProviderHealthyError(t *testing.T) { } } +// test to ensure KMSv2 API version is not changed after the first status response +func TestKMSv2SameVersionFromStatus(t *testing.T) { + probe := &kmsv2PluginProbe{name: "testplugin"} + service, _ := newMockEnvelopeKMSv2Service(testContext(t), "unix:///tmp/testprovider.sock", "providerName", 3*time.Second) + probe.l = &sync.Mutex{} + probe.state.Store(&envelopekmsv2.State{}) + probe.service = service + + testCases := []struct { + desc string + expectedErr string + newVersion string + }{ + { + desc: "version changed", + newVersion: "v2", + expectedErr: "KMSv2 API version should not change", + }, + { + desc: "version unchanged", + newVersion: "v2beta1", + expectedErr: "", + }, + } + for _, tt := range testCases { + t.Run(tt.desc, func(t *testing.T) { + statusResponse := &kmsservice.StatusResponse{ + Healthz: "ok", + Version: "v2beta1", + KeyID: "1", + } + if err := probe.isKMSv2ProviderHealthyAndMaybeRotateDEK(testContext(t), statusResponse); err != nil { + t.Fatal(err) + } + statusResponse.Version = tt.newVersion + err := probe.isKMSv2ProviderHealthyAndMaybeRotateDEK(testContext(t), statusResponse) + if len(tt.expectedErr) > 0 && !strings.Contains(errString(err), tt.expectedErr) { + t.Errorf("expected err %q, got %q", tt.expectedErr, errString(err)) + } + if len(tt.expectedErr) == 0 && err != nil { + t.Fatal(err) + } + }) + } +} + func testContext(t *testing.T) context.Context { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -1840,7 +1841,7 @@ func TestComputeEncryptionConfigHash(t *testing.T) { } func Test_kmsv2PluginProbe_rotateDEKOnKeyIDChange(t *testing.T) { - defaultUseSeed := utilfeature.DefaultFeatureGate.Enabled(features.KMSv2KDF) + defaultUseSeed := GetKDF() origNowFunc := envelopekmsv2.NowFunc now := origNowFunc() // freeze time @@ -2065,7 +2066,7 @@ func Test_kmsv2PluginProbe_rotateDEKOnKeyIDChange(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2KDF, tt.useSeed)() + defer SetKDFForTests(tt.useSeed)() var buf bytes.Buffer klog.SetOutput(&buf) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-kmsv2.yaml b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-kmsv2.yaml index fd6e9079ea0d8..1265dd1b48190 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-kmsv2.yaml +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-kmsv2.yaml @@ -10,6 +10,7 @@ resources: endpoint: unix:///tmp/testprovider.sock timeout: 15s - kms: + apiVersion: v2 name: bar endpoint: unix:///tmp/testprovider.sock timeout: 15s diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-mixed.yaml b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-mixed.yaml new file mode 100644 index 0000000000000..fd6e9079ea0d8 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/testdata/valid-configs/kms/multiple-providers-mixed.yaml @@ -0,0 +1,15 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - kms: + apiVersion: v2 + name: foo + endpoint: unix:///tmp/testprovider.sock + timeout: 15s + - kms: + name: bar + endpoint: unix:///tmp/testprovider.sock + timeout: 15s diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 2d8741ebd1f92..a1fc3168c5dd1 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -44,8 +45,6 @@ import ( ) type EtcdOptions struct { - // The value of Paging on StorageConfig will be overridden by the - // calculated feature gate value. StorageConfig storagebackend.Config EncryptionProviderConfigFilepath string EncryptionProviderConfigAutomaticReload bool @@ -87,6 +86,12 @@ func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { return options } +var storageMediaTypes = sets.New( + runtime.ContentTypeJSON, + runtime.ContentTypeYAML, + runtime.ContentTypeProtobuf, +) + func (s *EtcdOptions) Validate() []error { if s == nil { return nil @@ -120,6 +125,10 @@ func (s *EtcdOptions) Validate() []error { allErrors = append(allErrors, fmt.Errorf("--encryption-provider-config-automatic-reload must be set with --encryption-provider-config")) } + if s.DefaultStorageMediaType != "" && !storageMediaTypes.Has(s.DefaultStorageMediaType) { + allErrors = append(allErrors, fmt.Errorf("--storage-media-type %q invalid, allowed values: %s", s.DefaultStorageMediaType, strings.Join(sets.List(storageMediaTypes), ", "))) + } + return allErrors } @@ -332,18 +341,23 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro c.ResourceTransformers = dynamicTransformers if !s.SkipHealthEndpoints { - c.AddHealthChecks(dynamicTransformers) + addHealthChecksWithoutLivez(c, dynamicTransformers) } } else { c.ResourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers) if !s.SkipHealthEndpoints { - c.AddHealthChecks(encryptionConfiguration.HealthChecks...) + addHealthChecksWithoutLivez(c, encryptionConfiguration.HealthChecks...) } } return nil } +func addHealthChecksWithoutLivez(c *server.Config, healthChecks ...healthz.HealthChecker) { + c.HealthzChecks = append(c.HealthzChecks, healthChecks...) + c.ReadyzChecks = append(c.ReadyzChecks, healthChecks...) +} + func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { healthCheck, err := storagefactory.CreateHealthCheck(s.StorageConfig, c.DrainedNotify()) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go index d10474248b4e7..f24077a349ab1 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/etcd_test.go @@ -160,6 +160,40 @@ func TestEtcdOptionsValidate(t *testing.T) { EtcdServersOverrides: []string{"/events#http://127.0.0.1:4002"}, }, }, + { + name: "empty storage-media-type", + testOptions: &EtcdOptions{ + StorageConfig: storagebackend.Config{ + Transport: storagebackend.TransportConfig{ + ServerList: []string{"http://127.0.0.1"}, + }, + }, + DefaultStorageMediaType: "", + }, + }, + { + name: "recognized storage-media-type", + testOptions: &EtcdOptions{ + StorageConfig: storagebackend.Config{ + Transport: storagebackend.TransportConfig{ + ServerList: []string{"http://127.0.0.1"}, + }, + }, + DefaultStorageMediaType: "application/json", + }, + }, + { + name: "unrecognized storage-media-type", + testOptions: &EtcdOptions{ + StorageConfig: storagebackend.Config{ + Transport: storagebackend.TransportConfig{ + ServerList: []string{"http://127.0.0.1"}, + }, + }, + DefaultStorageMediaType: "foo/bar", + }, + expectErr: `--storage-media-type "foo/bar" invalid, allowed values: application/json, application/vnd.kubernetes.protobuf, application/yaml`, + }, } for _, testcase := range testCases { @@ -229,68 +263,90 @@ func TestParseWatchCacheSizes(t *testing.T) { } func TestKMSHealthzEndpoint(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv2, true)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KMSv1, true)() testCases := []struct { name string encryptionConfigPath string - wantChecks []string + wantHealthzChecks []string + wantReadyzChecks []string + wantLivezChecks []string skipHealth bool reload bool }{ { - name: "no kms-provider, expect no kms healthz check", + name: "no kms-provider, expect no kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/no-kms-provider.yaml", - wantChecks: []string{"etcd"}, + wantHealthzChecks: []string{"etcd"}, + wantReadyzChecks: []string{"etcd", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "no kms-provider+reload, expect single kms healthz check", + name: "no kms-provider+reload, expect single kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/no-kms-provider.yaml", reload: true, - wantChecks: []string{"etcd", "kms-providers"}, + wantHealthzChecks: []string{"etcd", "kms-providers"}, + wantReadyzChecks: []string{"etcd", "kms-providers", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "single kms-provider, expect single kms healthz check", + name: "single kms-provider, expect single kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/single-kms-provider.yaml", - wantChecks: []string{"etcd", "kms-provider-0"}, + wantHealthzChecks: []string{"etcd", "kms-provider-0"}, + wantReadyzChecks: []string{"etcd", "kms-provider-0", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "two kms-providers, expect two kms healthz checks", + name: "two kms-providers, expect two kms healthz checks, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers.yaml", - wantChecks: []string{"etcd", "kms-provider-0", "kms-provider-1"}, + wantHealthzChecks: []string{"etcd", "kms-provider-0", "kms-provider-1"}, + wantReadyzChecks: []string{"etcd", "kms-provider-0", "kms-provider-1", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "two kms-providers+reload, expect single kms healthz check", + name: "two kms-providers+reload, expect single kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers.yaml", reload: true, - wantChecks: []string{"etcd", "kms-providers"}, + wantHealthzChecks: []string{"etcd", "kms-providers"}, + wantReadyzChecks: []string{"etcd", "kms-providers", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "kms v1+v2, expect three kms healthz checks", + name: "kms v1+v2, expect three kms healthz checks, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers-with-v2.yaml", - wantChecks: []string{"etcd", "kms-provider-0", "kms-provider-1", "kms-provider-2"}, + wantHealthzChecks: []string{"etcd", "kms-provider-0", "kms-provider-1", "kms-provider-2"}, + wantReadyzChecks: []string{"etcd", "kms-provider-0", "kms-provider-1", "kms-provider-2", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "kms v1+v2+reload, expect single kms healthz check", + name: "kms v1+v2+reload, expect single kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers-with-v2.yaml", reload: true, - wantChecks: []string{"etcd", "kms-providers"}, + wantHealthzChecks: []string{"etcd", "kms-providers"}, + wantReadyzChecks: []string{"etcd", "kms-providers", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "multiple kms v2, expect single kms healthz check", + name: "multiple kms v2, expect single kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-v2-providers.yaml", - wantChecks: []string{"etcd", "kms-providers"}, + wantHealthzChecks: []string{"etcd", "kms-providers"}, + wantReadyzChecks: []string{"etcd", "kms-providers", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "multiple kms v2+reload, expect single kms healthz check", + name: "multiple kms v2+reload, expect single kms healthz check, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-v2-providers.yaml", reload: true, - wantChecks: []string{"etcd", "kms-providers"}, + wantHealthzChecks: []string{"etcd", "kms-providers"}, + wantReadyzChecks: []string{"etcd", "kms-providers", "etcd-readiness"}, + wantLivezChecks: []string{"etcd"}, }, { - name: "two kms-providers with skip, expect zero kms healthz checks", + name: "two kms-providers with skip, expect zero kms healthz checks, no kms livez check", encryptionConfigPath: "testdata/encryption-configs/multiple-kms-providers.yaml", - wantChecks: nil, + wantHealthzChecks: nil, + wantReadyzChecks: nil, + wantLivezChecks: nil, skipHealth: true, }, } @@ -310,7 +366,9 @@ func TestKMSHealthzEndpoint(t *testing.T) { t.Fatalf("Failed to add healthz error: %v", err) } - healthChecksAreEqual(t, tc.wantChecks, serverConfig.HealthzChecks) + healthChecksAreEqual(t, tc.wantHealthzChecks, serverConfig.HealthzChecks, "healthz") + healthChecksAreEqual(t, tc.wantReadyzChecks, serverConfig.ReadyzChecks, "readyz") + healthChecksAreEqual(t, tc.wantLivezChecks, serverConfig.LivezChecks, "livez") }) } } @@ -320,17 +378,20 @@ func TestReadinessCheck(t *testing.T) { name string wantReadyzChecks []string wantHealthzChecks []string + wantLivezChecks []string skipHealth bool }{ { name: "Readyz should have etcd-readiness check", wantReadyzChecks: []string{"etcd", "etcd-readiness"}, wantHealthzChecks: []string{"etcd"}, + wantLivezChecks: []string{"etcd"}, }, { name: "skip health, Readyz should not have etcd-readiness check", wantReadyzChecks: nil, wantHealthzChecks: nil, + wantLivezChecks: nil, skipHealth: true, }, } @@ -346,13 +407,14 @@ func TestReadinessCheck(t *testing.T) { t.Fatalf("Failed to add healthz error: %v", err) } - healthChecksAreEqual(t, tc.wantReadyzChecks, serverConfig.ReadyzChecks) - healthChecksAreEqual(t, tc.wantHealthzChecks, serverConfig.HealthzChecks) + healthChecksAreEqual(t, tc.wantReadyzChecks, serverConfig.ReadyzChecks, "readyz") + healthChecksAreEqual(t, tc.wantHealthzChecks, serverConfig.HealthzChecks, "healthz") + healthChecksAreEqual(t, tc.wantLivezChecks, serverConfig.LivezChecks, "livez") }) } } -func healthChecksAreEqual(t *testing.T, want []string, healthChecks []healthz.HealthChecker) { +func healthChecksAreEqual(t *testing.T, want []string, healthChecks []healthz.HealthChecker, checkerType string) { t.Helper() wantSet := sets.NewString(want...) @@ -365,6 +427,6 @@ func healthChecksAreEqual(t *testing.T, want []string, healthChecks []healthz.He gotSet.Delete("log", "ping") // not relevant for our tests if !wantSet.Equal(gotSet) { - t.Errorf("healthz checks are not equal, missing=%q, extra=%q", wantSet.Difference(gotSet).List(), gotSet.Difference(wantSet).List()) + t.Errorf("%s checks are not equal, missing=%q, extra=%q", checkerType, wantSet.Difference(gotSet).List(), gotSet.Difference(wantSet).List()) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go index 69f8fb51556bc..5d031e202e0fb 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/recommended.go @@ -154,7 +154,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { config.SharedInformerFactory, kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta3(), config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, - config.RequestTimeout/4, ) } else { klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled") diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go index d05c08e547c7f..1d29cce52122a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go @@ -67,9 +67,9 @@ func TestValidateTracingOptions(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { errs := tc.contents.Validate() - if tc.expectError == false && len(errs) != 0 { + if !tc.expectError && len(errs) != 0 { t.Errorf("Calling Validate expected no error, got %v", errs) - } else if tc.expectError == true && len(errs) == 0 { + } else if tc.expectError && len(errs) == 0 { t.Errorf("Calling Validate expected error, got no error") } }) diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go b/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go index 2819d1576016b..12c8b1ad9100b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -32,7 +32,8 @@ import ( // OpenAPI installs spec endpoints for each web service. type OpenAPI struct { - Config *common.Config + Config *common.Config + V3Config *common.OpenAPIV3Config } // Install adds the SwaggerUI webservice to the given mux. @@ -65,7 +66,7 @@ func (oa OpenAPI) InstallV3(c *restful.Container, mux *mux.PathRecorderMux) *han } for gv, ws := range grouped { - spec, err := builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(ws), oa.Config) + spec, err := builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(ws), oa.V3Config) if err != nil { klog.Errorf("Failed to build OpenAPI v3 for group %s, %q", gv, err) diff --git a/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go b/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go index 64bcc87ebf173..0a4fdc6932eda 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go @@ -189,7 +189,10 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur if s.HTTP2MaxStreamsPerConnection > 0 { http2Options.MaxConcurrentStreams = uint32(s.HTTP2MaxStreamsPerConnection) } else { - http2Options.MaxConcurrentStreams = 250 + // match http2.initialMaxConcurrentStreams used by clients + // this makes it so that a malicious client can only open 400 streams before we forcibly close the connection + // https://github.com/golang/net/commit/b225e7ca6dde1ef5a5ae5ce922861bda011cfabd + http2Options.MaxConcurrentStreams = 100 } // increase the connection buffer size from the 1MB default to handle the specified number of concurrent streams diff --git a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go index e5fde95921a85..0dc50cea61d4b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -112,8 +112,6 @@ type groupResourceOverrides struct { // decoderDecoratorFn is optional and may wrap the provided decoders (can add new decoders). The order of // returned decoders will be priority for attempt to decode. decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder - // disablePaging will prevent paging on the provided resource. - disablePaging bool } // Apply overrides the provided config and options if the override has a value in that position @@ -137,9 +135,6 @@ func (o groupResourceOverrides) Apply(config *storagebackend.Config, options *St if o.decoderDecoratorFn != nil { options.DecoderDecoratorFn = o.decoderDecoratorFn } - if o.disablePaging { - config.Paging = false - } } var _ StorageFactory = &DefaultStorageFactory{} @@ -154,7 +149,6 @@ func NewDefaultStorageFactory( resourceConfig APIResourceConfigSource, specialDefaultResourcePrefixes map[schema.GroupResource]string, ) *DefaultStorageFactory { - config.Paging = true if len(defaultMediaType) == 0 { defaultMediaType = runtime.ContentTypeJSON } diff --git a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory_test.go b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory_test.go index 2e15ba171c14b..c52049ca0ec09 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory_test.go @@ -197,30 +197,30 @@ func TestConfigs(t *testing.T) { }{ { wantConfigs: []storagebackend.Config{ - {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry", Paging: true}, + {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry"}, }, }, { resource: &schema.GroupResource{Group: example.GroupName, Resource: "resource"}, servers: []string{}, wantConfigs: []storagebackend.Config{ - {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry", Paging: true}, + {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry"}, }, }, { resource: &schema.GroupResource{Group: example.GroupName, Resource: "resource"}, servers: []string{"http://127.0.0.1:10000"}, wantConfigs: []storagebackend.Config{ - {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry", Paging: true}, - {Transport: storagebackend.TransportConfig{ServerList: []string{"http://127.0.0.1:10000"}}, Prefix: "/registry", Paging: true}, + {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry"}, + {Transport: storagebackend.TransportConfig{ServerList: []string{"http://127.0.0.1:10000"}}, Prefix: "/registry"}, }, }, { resource: &schema.GroupResource{Group: example.GroupName, Resource: "resource"}, servers: []string{"http://127.0.0.1:10000", "https://127.0.0.1", "http://127.0.0.2"}, wantConfigs: []storagebackend.Config{ - {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry", Paging: true}, - {Transport: storagebackend.TransportConfig{ServerList: []string{"http://127.0.0.1:10000", "https://127.0.0.1", "http://127.0.0.2"}}, Prefix: "/registry", Paging: true}, + {Transport: storagebackend.TransportConfig{ServerList: defaultEtcdLocations}, Prefix: "/registry"}, + {Transport: storagebackend.TransportConfig{ServerList: []string{"http://127.0.0.1:10000", "https://127.0.0.1", "http://127.0.0.2"}}, Prefix: "/registry"}, }, }, } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go index b47fe0ed4b962..1a59615ce394b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go @@ -485,7 +485,7 @@ func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived(t *testing.T forget := func(drainWatcher bool) { lock.Lock() defer lock.Unlock() - if drainWatcher == true { + if drainWatcher { t.Fatalf("didn't expect drainWatcher to be set to true") } count++ diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 7248fe71768c9..c7d9390ae76af 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -1252,7 +1252,7 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) { // // The returned function must be called under the watchCache lock. func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, parsedResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents == false || !opts.Predicate.AllowWatchBookmarks { + if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks { return func() uint64 { return 0 }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedResourceVersion, opts) @@ -1267,7 +1267,7 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, // // The returned function must be called under the watchCache lock. func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents == true { + if opts.SendInitialEvents == nil || *opts.SendInitialEvents { return func() uint64 { return parsedWatchResourceVersion }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedWatchResourceVersion, opts) @@ -1298,7 +1298,7 @@ func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedW // Additionally, it instructs the caller whether it should ask for // all events from the cache (full state) or not. func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) (bool, error) { - if opts.SendInitialEvents != nil && *opts.SendInitialEvents == true { + if opts.SendInitialEvents != nil && *opts.SendInitialEvents { err := c.watchCache.waitUntilFreshAndBlock(ctx, requestedWatchRV) defer c.watchCache.RUnlock() return err == nil, err diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_test.go index c4ca88ef69913..e13ffbd03e1a1 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -141,12 +142,30 @@ func TestValidateDeletionWithSuggestion(t *testing.T) { storagetesting.RunTestValidateDeletionWithSuggestion(ctx, t, cacher) } +func TestValidateDeletionWithOnlySuggestionValid(t *testing.T) { + ctx, cacher, terminate := testSetup(t) + t.Cleanup(terminate) + storagetesting.RunTestValidateDeletionWithOnlySuggestionValid(ctx, t, cacher) +} + +func TestDeleteWithConflict(t *testing.T) { + ctx, cacher, terminate := testSetup(t) + t.Cleanup(terminate) + storagetesting.RunTestDeleteWithConflict(ctx, t, cacher) +} + func TestPreconditionalDeleteWithSuggestion(t *testing.T) { ctx, cacher, terminate := testSetup(t) t.Cleanup(terminate) storagetesting.RunTestPreconditionalDeleteWithSuggestion(ctx, t, cacher) } +func TestPreconditionalDeleteWithSuggestionPass(t *testing.T) { + ctx, cacher, terminate := testSetup(t) + t.Cleanup(terminate) + storagetesting.RunTestPreconditionalDeleteWithOnlySuggestionPass(ctx, t, cacher) +} + func TestList(t *testing.T) { ctx, cacher, server, terminate := testSetupWithEtcdServer(t) t.Cleanup(terminate) @@ -160,12 +179,6 @@ func TestListWithListFromCache(t *testing.T) { storagetesting.RunTestList(ctx, t, cacher, compactStorage(cacher, server.V3Client), true) } -func TestListWithoutPaging(t *testing.T) { - ctx, cacher, terminate := testSetup(t, withoutPaging) - t.Cleanup(terminate) - storagetesting.RunTestListWithoutPaging(ctx, t, cacher) -} - func TestGetListNonRecursive(t *testing.T) { ctx, cacher, terminate := testSetup(t) t.Cleanup(terminate) @@ -327,6 +340,18 @@ func TestSendInitialEventsBackwardCompatibility(t *testing.T) { storagetesting.RunSendInitialEventsBackwardCompatibility(ctx, t, store) } +func TestCacherWatchSemantics(t *testing.T) { + store, terminate := testSetupWithEtcdAndCreateWrapper(t) + t.Cleanup(terminate) + storagetesting.RunWatchSemantics(context.TODO(), t, store) +} + +func TestCacherWatchSemanticInitialEventsExtended(t *testing.T) { + store, terminate := testSetupWithEtcdAndCreateWrapper(t) + t.Cleanup(terminate) + storagetesting.RunWatchSemanticInitialEventsExtended(context.TODO(), t, store) +} + // =================================================== // Test-setup related function are following. // =================================================== @@ -337,7 +362,6 @@ type setupOptions struct { resourcePrefix string keyFunc func(runtime.Object) (string, error) indexerFuncs map[string]storage.IndexerFunc - pagingEnabled bool clock clock.WithTicker } @@ -348,7 +372,6 @@ func withDefaults(options *setupOptions) { options.resourcePrefix = prefix options.keyFunc = func(obj runtime.Object) (string, error) { return storage.NamespaceKeyFunc(prefix, obj) } - options.pagingEnabled = true options.clock = clock.RealClock{} } @@ -370,10 +393,6 @@ func withSpecNodeNameIndexerFuncs(options *setupOptions) { } } -func withoutPaging(options *setupOptions) { - options.pagingEnabled = false -} - func testSetup(t *testing.T, opts ...setupOption) (context.Context, *Cacher, tearDownFunc) { ctx, cacher, _, tearDown := testSetupWithEtcdServer(t, opts...) return ctx, cacher, tearDown @@ -386,7 +405,7 @@ func testSetupWithEtcdServer(t *testing.T, opts ...setupOption) (context.Context opt(&setupOpts) } - server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix(), setupOpts.pagingEnabled) + server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) // Inject one list error to make sure we test the relist case. wrappedStorage := &storagetesting.StorageInjectingListErrors{ Interface: etcdStorage, @@ -424,3 +443,36 @@ func testSetupWithEtcdServer(t *testing.T, opts ...setupOption) (context.Context return ctx, cacher, server, terminate } + +func testSetupWithEtcdAndCreateWrapper(t *testing.T, opts ...setupOption) (storage.Interface, tearDownFunc) { + _, cacher, _, tearDown := testSetupWithEtcdServer(t, opts...) + + if err := cacher.ready.wait(context.TODO()); err != nil { + t.Fatalf("unexpected error waiting for the cache to be ready") + } + return &createWrapper{Cacher: cacher}, tearDown +} + +type createWrapper struct { + *Cacher +} + +func (c *createWrapper) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + if err := c.Cacher.Create(ctx, key, obj, out, ttl); err != nil { + return err + } + return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) { + currentObj := c.Cacher.newFunc() + err := c.Cacher.Get(ctx, key, storage.GetOptions{ResourceVersion: "0"}, currentObj) + if err != nil { + if storage.IsNotFound(err) { + return false, nil + } + return false, err + } + if !apiequality.Semantic.DeepEqual(currentObj, out) { + return false, nil + } + return true, nil + }) +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_testing_utils_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_testing_utils_test.go index 3eb757c42a6dd..4fe2e759ae7f4 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_testing_utils_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_testing_utils_test.go @@ -55,7 +55,7 @@ func init() { func newPod() runtime.Object { return &example.Pod{} } func newPodList() runtime.Object { return &example.PodList{} } -func newEtcdTestStorage(t *testing.T, prefix string, pagingEnabled bool) (*etcd3testing.EtcdTestServer, storage.Interface) { +func newEtcdTestStorage(t *testing.T, prefix string) (*etcd3testing.EtcdTestServer, storage.Interface) { server, _ := etcd3testing.NewUnsecuredEtcd3TestClientServer(t) storage := etcd3.New( server.V3Client, @@ -66,7 +66,6 @@ func newEtcdTestStorage(t *testing.T, prefix string, pagingEnabled bool) (*etcd3 "/pods", schema.GroupResource{Resource: "pods"}, identity.NewEncryptCheckTransformer(), - pagingEnabled, etcd3.NewDefaultLeaseManagerConfig()) return server, storage } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go index eecf2ddfc9804..11220e721f352 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go @@ -342,7 +342,7 @@ func TestWatchCacheBypass(t *testing.T) { } func TestEmptyWatchEventCache(t *testing.T) { - server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix(), true) + server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) // add a few objects @@ -1340,15 +1340,6 @@ func verifyEvents(t *testing.T, w watch.Interface, events []watch.Event, strictO } } -func verifyNoEvents(t *testing.T, w watch.Interface) { - select { - case e := <-w.ResultChan(): - t.Errorf("Unexpected: %#v event received, expected no events", e) - case <-time.After(time.Second): - return - } -} - func TestCachingDeleteEvents(t *testing.T) { backingStorage := &dummyStorage{} cacher, _, err := newTestCacher(backingStorage) @@ -1611,205 +1602,6 @@ func TestCacheIntervalInvalidationStopsWatch(t *testing.T) { } } -func TestCacherWatchSemantics(t *testing.T) { - trueVal, falseVal := true, false - makePod := func(rv uint64) *example.Pod { - return &example.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("pod-%d", rv), - Namespace: "ns", - ResourceVersion: fmt.Sprintf("%d", rv), - Annotations: map[string]string{}, - }, - } - } - - scenarios := []struct { - name string - allowWatchBookmarks bool - sendInitialEvents *bool - resourceVersion string - storageResourceVersion string - - initialPods []*example.Pod - podsAfterEstablishingWatch []*example.Pod - - expectedInitialEventsInStrictOrder []watch.Event - expectedInitialEventsInRandomOrder []watch.Event - expectedEventsAfterEstablishingWatch []watch.Event - }{ - { - name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=unset, storageRV=102", - allowWatchBookmarks: true, - sendInitialEvents: &trueVal, - storageResourceVersion: "102", - initialPods: []*example.Pod{makePod(101)}, - podsAfterEstablishingWatch: []*example.Pod{makePod(102)}, - expectedInitialEventsInRandomOrder: []watch.Event{{Type: watch.Added, Object: makePod(101)}}, - expectedEventsAfterEstablishingWatch: []watch.Event{ - {Type: watch.Added, Object: makePod(102)}, - {Type: watch.Bookmark, Object: &example.Pod{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "102", - Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, - }, - }}, - }, - }, - { - name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=0, storageRV=105", - allowWatchBookmarks: true, - sendInitialEvents: &trueVal, - resourceVersion: "0", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - expectedInitialEventsInRandomOrder: []watch.Event{ - {Type: watch.Added, Object: makePod(101)}, - {Type: watch.Added, Object: makePod(102)}, - }, - expectedInitialEventsInStrictOrder: []watch.Event{ - {Type: watch.Bookmark, Object: &example.Pod{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "102", - Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, - }, - }}, - }, - }, - { - name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=101, storageRV=105", - allowWatchBookmarks: true, - sendInitialEvents: &trueVal, - resourceVersion: "101", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - expectedInitialEventsInRandomOrder: []watch.Event{{Type: watch.Added, Object: makePod(101)}, {Type: watch.Added, Object: makePod(102)}}, - expectedInitialEventsInStrictOrder: []watch.Event{ - {Type: watch.Bookmark, Object: &example.Pod{ - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: "102", - Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, - }, - }}, - }, - }, - { - name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=unset, storageRV=102", - sendInitialEvents: &trueVal, - storageResourceVersion: "102", - initialPods: []*example.Pod{makePod(101)}, - expectedInitialEventsInRandomOrder: []watch.Event{{Type: watch.Added, Object: makePod(101)}}, - podsAfterEstablishingWatch: []*example.Pod{makePod(102)}, - expectedEventsAfterEstablishingWatch: []watch.Event{{Type: watch.Added, Object: makePod(102)}}, - }, - { - // note we set storage's RV to some future value, mustn't be used by this scenario - name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=0, storageRV=105", - sendInitialEvents: &trueVal, - resourceVersion: "0", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - expectedInitialEventsInRandomOrder: []watch.Event{{Type: watch.Added, Object: makePod(101)}, {Type: watch.Added, Object: makePod(102)}}, - }, - { - // note we set storage's RV to some future value, mustn't be used by this scenario - name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=101, storageRV=105", - sendInitialEvents: &trueVal, - resourceVersion: "101", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - // make sure we only get initial events that are > initial RV (101) - expectedInitialEventsInRandomOrder: []watch.Event{{Type: watch.Added, Object: makePod(101)}, {Type: watch.Added, Object: makePod(102)}}, - }, - { - name: "sendInitialEvents=false, RV=unset, storageRV=103", - sendInitialEvents: &falseVal, - storageResourceVersion: "103", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - podsAfterEstablishingWatch: []*example.Pod{makePod(104)}, - expectedEventsAfterEstablishingWatch: []watch.Event{{Type: watch.Added, Object: makePod(104)}}, - }, - { - // note we set storage's RV to some future value, mustn't be used by this scenario - name: "sendInitialEvents=false, RV=0, storageRV=105", - sendInitialEvents: &falseVal, - resourceVersion: "0", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - podsAfterEstablishingWatch: []*example.Pod{makePod(103)}, - expectedEventsAfterEstablishingWatch: []watch.Event{{Type: watch.Added, Object: makePod(103)}}, - }, - { - // note we set storage's RV to some future value, mustn't be used by this scenario - name: "legacy, RV=0, storageRV=105", - resourceVersion: "0", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - expectedInitialEventsInRandomOrder: []watch.Event{{Type: watch.Added, Object: makePod(101)}, {Type: watch.Added, Object: makePod(102)}}, - }, - { - // note we set storage's RV to some future value, mustn't be used by this scenario - name: "legacy, RV=unset, storageRV=105", - storageResourceVersion: "105", - initialPods: []*example.Pod{makePod(101), makePod(102)}, - // no events because the watch is delegated to the underlying storage - }, - } - for _, scenario := range scenarios { - t.Run(scenario.name, func(t *testing.T) { - // set up env - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() - storageListMetaResourceVersion := "" - backingStorage := &dummyStorage{getListFn: func(_ context.Context, _ string, _ storage.ListOptions, listObj runtime.Object) error { - podList := listObj.(*example.PodList) - podList.ListMeta = metav1.ListMeta{ResourceVersion: storageListMetaResourceVersion} - return nil - }} - - cacher, _, err := newTestCacher(backingStorage) - if err != nil { - t.Fatalf("falied to create cacher: %v", err) - } - defer cacher.Stop() - if err := cacher.ready.wait(context.TODO()); err != nil { - t.Fatalf("unexpected error waiting for the cache to be ready") - } - - // now, run a scenario - // but first let's add some initial data - for _, obj := range scenario.initialPods { - err = cacher.watchCache.Add(obj) - require.NoError(t, err, "failed to add a pod: %v") - } - // read request params - opts := storage.ListOptions{Predicate: storage.Everything} - opts.SendInitialEvents = scenario.sendInitialEvents - opts.Predicate.AllowWatchBookmarks = scenario.allowWatchBookmarks - if len(scenario.resourceVersion) > 0 { - opts.ResourceVersion = scenario.resourceVersion - } - // before starting a new watch set a storage RV to some future value - storageListMetaResourceVersion = scenario.storageResourceVersion - - w, err := cacher.Watch(context.Background(), "pods/ns", opts) - require.NoError(t, err, "failed to create watch: %v") - defer w.Stop() - - // make sure we only get initial events - verifyEvents(t, w, scenario.expectedInitialEventsInRandomOrder, false) - verifyEvents(t, w, scenario.expectedInitialEventsInStrictOrder, true) - verifyNoEvents(t, w) - // add a pod that is greater than the storage's RV when the watch was started - for _, obj := range scenario.podsAfterEstablishingWatch { - err = cacher.watchCache.Add(obj) - require.NoError(t, err, "failed to add a pod: %v") - } - verifyEvents(t, w, scenario.expectedEventsAfterEstablishingWatch, true) - verifyNoEvents(t, w) - }) - } -} - func TestWaitUntilWatchCacheFreshAndForceAllEvents(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() backingStorage := &dummyStorage{} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/lister_watcher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/lister_watcher_test.go index 6506f1ff6aac6..6c89ee3038e14 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/lister_watcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/lister_watcher_test.go @@ -28,7 +28,7 @@ import ( func TestCacherListerWatcher(t *testing.T) { prefix := "pods" fn := func() runtime.Object { return &example.PodList{} } - server, store := newEtcdTestStorage(t, prefix, true) + server, store := newEtcdTestStorage(t, prefix) defer server.Terminate(t) objects := []*example.Pod{ @@ -62,7 +62,7 @@ func TestCacherListerWatcher(t *testing.T) { func TestCacherListerWatcherPagination(t *testing.T) { prefix := "pods" fn := func() runtime.Object { return &example.PodList{} } - server, store := newEtcdTestStorage(t, prefix, true) + server, store := newEtcdTestStorage(t, prefix) defer server.Terminate(t) // We need the list to be sorted by name to later check the alphabetical order of diff --git a/staging/src/k8s.io/apiserver/pkg/storage/errors.go b/staging/src/k8s.io/apiserver/pkg/storage/errors.go index ed4f4d0d0e8ea..5f29097c59ca2 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/errors.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/errors.go @@ -17,13 +17,16 @@ limitations under the License. package storage import ( + "errors" "fmt" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" ) +var ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created") + const ( ErrCodeKeyNotFound int = iota + 1 ErrCodeKeyExists @@ -176,7 +179,7 @@ var tooLargeResourceVersionCauseMsg = "Too large resource version" // NewTooLargeResourceVersionError returns a timeout error with the given retrySeconds for a request for // a minimum resource version that is larger than the largest currently available resource version for a requested resource. func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error { - err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) + err := apierrors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) err.ErrStatus.Details.Causes = []metav1.StatusCause{ { Type: metav1.CauseTypeResourceVersionTooLarge, @@ -188,8 +191,8 @@ func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uin // IsTooLargeResourceVersion returns true if the error is a TooLargeResourceVersion error. func IsTooLargeResourceVersion(err error) bool { - if !errors.IsTimeout(err) { + if !apierrors.IsTimeout(err) { return false } - return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) + return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go index 3e5bfb1c6331e..e7644ddfae6cc 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -30,6 +30,17 @@ type event struct { isDeleted bool isCreated bool isProgressNotify bool + // isInitialEventsEndBookmark helps us keep track + // of whether we have sent an annotated bookmark event. + // + // when this variable is set to true, + // a special annotation will be added + // to the bookmark event. + // + // note that we decided to extend the event + // struct field to eliminate contention + // between startWatching and processEvent + isInitialEventsEndBookmark bool } // parseKV converts a KeyValue retrieved from an initial sync() listing to a synthetic isCreated event. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index 0192d754e2618..fadc87d53de2f 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -69,7 +69,7 @@ var ( objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_storage_objects", - Help: "Number of stored objects at the time of last check split by kind.", + Help: "Number of stored objects at the time of last check split by kind. In case of a fetching error, the value will be -1.", StabilityLevel: compbasemetrics.STABLE, }, []string{"resource"}, diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics_test.go index 6190131f075d3..6d4d100dbfd12 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics_test.go @@ -226,6 +226,48 @@ func TestStorageSizeCollector(t *testing.T) { } +func TestUpdateObjectCount(t *testing.T) { + registry := metrics.NewKubeRegistry() + registry.Register(objectCounts) + testedMetrics := "apiserver_storage_objects" + + testCases := []struct { + desc string + resource string + count int64 + want string + }{ + { + desc: "successful fetch", + resource: "foo", + count: 10, + want: `# HELP apiserver_storage_objects [STABLE] Number of stored objects at the time of last check split by kind. In case of a fetching error, the value will be -1. +# TYPE apiserver_storage_objects gauge +apiserver_storage_objects{resource="foo"} 10 +`, + }, + { + desc: "failed fetch", + resource: "bar", + count: -1, + want: `# HELP apiserver_storage_objects [STABLE] Number of stored objects at the time of last check split by kind. In case of a fetching error, the value will be -1. +# TYPE apiserver_storage_objects gauge +apiserver_storage_objects{resource="bar"} -1 +`, + }, + } + + for _, test := range testCases { + t.Run(test.desc, func(t *testing.T) { + defer registry.Reset() + UpdateObjectCount(test.resource, test.count) + if err := testutil.GatherAndCompare(registry, strings.NewReader(test.want), testedMetrics); err != nil { + t.Fatal(err) + } + }) + } +} + type fakeEtcdMonitor struct { storageSize int64 } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go index 2cde92abab7e3..9c52ce17eb93b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/storage" @@ -78,7 +77,6 @@ type store struct { groupResource schema.GroupResource groupResourceString string watcher *watcher - pagingEnabled bool leaseManager *leaseManager } @@ -97,11 +95,11 @@ type objState struct { } // New returns an etcd3 implementation of storage.Interface. -func New(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface { - return newStore(c, codec, newFunc, newListFunc, prefix, resourcePrefix, groupResource, transformer, pagingEnabled, leaseManagerConfig) +func New(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) storage.Interface { + return newStore(c, codec, newFunc, newListFunc, prefix, resourcePrefix, groupResource, transformer, leaseManagerConfig) } -func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store { +func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) *store { versioner := storage.APIObjectVersioner{} // for compatibility with etcd2 impl. // no-op for default prefix of '/registry'. @@ -112,12 +110,11 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func pathPrefix += "/" } - // TODO(p0lyn0mial): pass newListFunc and resourcePrefix to the watcher w := &watcher{ client: c, codec: codec, - groupResource: groupResource, newFunc: newFunc, + groupResource: groupResource, versioner: versioner, transformer: transformer, } @@ -126,19 +123,21 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func } else { w.objectType = reflect.TypeOf(newFunc()).String() } - s := &store{ client: c, codec: codec, versioner: versioner, transformer: transformer, - pagingEnabled: pagingEnabled, pathPrefix: pathPrefix, groupResource: groupResource, groupResourceString: groupResource.String(), watcher: w, leaseManager: newDefaultLeaseManager(c, leaseManagerConfig), } + + w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) { + return storage.GetCurrentResourceVersionFromStorage(ctx, s, newListFunc, resourcePrefix, w.objectType) + } return s } @@ -198,7 +197,7 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ) defer span.End(500 * time.Millisecond) if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { - return errors.New("resourceVersion should not be set on objects to be created") + return storage.ErrResourceVersionSetOnCreate } if err := s.versioner.PrepareObjectForStorage(obj); err != nil { return fmt.Errorf("PrepareObjectForStorage failed: %v", err) @@ -271,15 +270,7 @@ func (s *store) Delete( func (s *store) conditionalDelete( ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error { - getCurrentState := func() (*objState, error) { - startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, key) - metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) - if err != nil { - return nil, err - } - return s.getState(ctx, getResp, key, v, false) - } + getCurrentState := s.getCurrentState(ctx, key, v, false) var origState *objState var err error @@ -407,15 +398,7 @@ func (s *store) GuaranteedUpdate( return fmt.Errorf("unable to convert output object to pointer: %v", err) } - getCurrentState := func() (*objState, error) { - startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, preparedKey) - metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) - if err != nil { - return nil, err - } - return s.getState(ctx, getResp, preparedKey, v, ignoreNotFound) - } + getCurrentState := s.getCurrentState(ctx, preparedKey, v, ignoreNotFound) var origState *objState var origStateIsCurrent bool @@ -638,7 +621,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption limit := opts.Predicate.Limit var paging bool options := make([]clientv3.OpOption, 0, 4) - if s.pagingEnabled && opts.Predicate.Limit > 0 { + if opts.Predicate.Limit > 0 { paging = true options = append(options, clientv3.WithLimit(limit)) limitOption = &options[len(options)-1] @@ -658,7 +641,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption var continueRV, withRev int64 var continueKey string switch { - case opts.Recursive && s.pagingEnabled && len(opts.Predicate.Continue) > 0: + case opts.Recursive && len(opts.Predicate.Continue) > 0: continueKey, continueRV, err = storage.DecodeContinue(opts.Predicate.Continue, keyPrefix) if err != nil { return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) @@ -683,7 +666,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption case metav1.ResourceVersionMatchExact: withRev = int64(*fromRV) case "": // legacy case - if opts.Recursive && s.pagingEnabled && opts.Predicate.Limit > 0 && *fromRV > 0 { + if opts.Recursive && opts.Predicate.Limit > 0 && *fromRV > 0 { withRev = int64(*fromRV) } default: @@ -855,18 +838,7 @@ func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { } // Watch implements storage.Interface.Watch. -// TODO(#115478): In order to graduate the WatchList feature to beta, the etcd3 implementation must/should also support it. func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { - // it is safe to skip SendInitialEvents if the request is backward compatible - // see https://github.com/kubernetes/kubernetes/blob/267eb25e60955fe8e438c6311412e7cf7d028acb/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go#L260 - compatibility := opts.Predicate.AllowWatchBookmarks == false && (opts.ResourceVersion == "" || opts.ResourceVersion == "0") - if opts.SendInitialEvents != nil && !compatibility { - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: s.groupResource.Group, Kind: s.groupResource.Resource}, - "", - field.ErrorList{field.Forbidden(field.NewPath("sendInitialEvents"), "for watch is unsupported by an etcd cluster")}, - ) - } preparedKey, err := s.prepareKey(key) if err != nil { return nil, err @@ -889,6 +861,18 @@ func (s *store) watchContext(ctx context.Context) context.Context { return clientv3.WithRequireLeader(ctx) } +func (s *store) getCurrentState(ctx context.Context, key string, v reflect.Value, ignoreNotFound bool) func() (*objState, error) { + return func() (*objState, error) { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) + if err != nil { + return nil, err + } + return s.getState(ctx, getResp, key, v, ignoreNotFound) + } +} + func (s *store) getState(ctx context.Context, getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { state := &objState{ meta: &storage.ResponseMeta{}, diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go index fb7b8025ccec9..dcf6c9d0f4dde 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go @@ -141,11 +141,26 @@ func TestValidateDeletionWithSuggestion(t *testing.T) { storagetesting.RunTestValidateDeletionWithSuggestion(ctx, t, store) } +func TestValidateDeletionWithOnlySuggestionValid(t *testing.T) { + ctx, store, _ := testSetup(t) + storagetesting.RunTestValidateDeletionWithOnlySuggestionValid(ctx, t, store) +} + +func TestDeleteWithConflict(t *testing.T) { + ctx, store, _ := testSetup(t) + storagetesting.RunTestDeleteWithConflict(ctx, t, store) +} + func TestPreconditionalDeleteWithSuggestion(t *testing.T) { ctx, store, _ := testSetup(t) storagetesting.RunTestPreconditionalDeleteWithSuggestion(ctx, t, store) } +func TestPreconditionalDeleteWithSuggestionPass(t *testing.T) { + ctx, store, _ := testSetup(t) + storagetesting.RunTestPreconditionalDeleteWithOnlySuggestionPass(ctx, t, store) +} + func TestGetListNonRecursive(t *testing.T) { ctx, store, _ := testSetup(t) storagetesting.RunTestGetListNonRecursive(ctx, t, store) @@ -201,11 +216,6 @@ func TestList(t *testing.T) { storagetesting.RunTestList(ctx, t, store, compactStorage(client), false) } -func TestListWithoutPaging(t *testing.T) { - ctx, store, _ := testSetup(t, withoutPaging()) - storagetesting.RunTestListWithoutPaging(ctx, t, store) -} - func checkStorageCallsInvariants(transformer *storagetesting.PrefixTransformer, recorder *clientRecorder) storagetesting.CallsValidation { return func(t *testing.T, pageSize, estimatedProcessedObjects uint64) { if reads := transformer.GetReadsAndReset(); reads != estimatedProcessedObjects { @@ -480,7 +490,6 @@ type setupOptions struct { resourcePrefix string groupResource schema.GroupResource transformer value.Transformer - pagingEnabled bool leaseConfig LeaseManagerConfig recorderEnabled bool @@ -502,12 +511,6 @@ func withPrefix(prefix string) setupOption { } } -func withoutPaging() setupOption { - return func(options *setupOptions) { - options.pagingEnabled = false - } -} - func withLeaseConfig(leaseConfig LeaseManagerConfig) setupOption { return func(options *setupOptions) { options.leaseConfig = leaseConfig @@ -531,7 +534,6 @@ func withDefaults(options *setupOptions) { options.resourcePrefix = "/pods" options.groupResource = schema.GroupResource{Resource: "pods"} options.transformer = newTestTransformer() - options.pagingEnabled = true options.leaseConfig = newTestLeaseManagerConfig() } @@ -556,7 +558,6 @@ func testSetup(t testing.TB, opts ...setupOption) (context.Context, *store, *cli setupOpts.resourcePrefix, setupOpts.groupResource, setupOpts.transformer, - setupOpts.pagingEnabled, setupOpts.leaseConfig, ) ctx := context.Background() diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/test_server.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/test_server.go index f696192b6ff9c..235cb78ca5157 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/test_server.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing/test_server.go @@ -44,7 +44,6 @@ func NewUnsecuredEtcd3TestClientServer(t *testing.T) (*EtcdTestServer, *storageb Transport: storagebackend.TransportConfig{ ServerList: server.V3Client.Endpoints(), }, - Paging: true, } return server, config } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index dbd99a3e3b1c2..85acf44f86b39 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -26,20 +26,21 @@ import ( "sync" "time" + clientv3 "go.etcd.io/etcd/client/v3" grpccodes "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" - - clientv3 "go.etcd.io/etcd/client/v3" - "k8s.io/klog/v2" ) @@ -67,13 +68,14 @@ func TestOnlySetFatalOnDecodeError(b bool) { } type watcher struct { - client *clientv3.Client - codec runtime.Codec - newFunc func() runtime.Object - objectType string - groupResource schema.GroupResource - versioner storage.Versioner - transformer value.Transformer + client *clientv3.Client + codec runtime.Codec + newFunc func() runtime.Object + objectType string + groupResource schema.GroupResource + versioner storage.Versioner + transformer value.Transformer + getCurrentStorageRV func(context.Context) (uint64, error) } // watchChan implements watch.Interface. @@ -105,8 +107,12 @@ func (w *watcher) Watch(ctx context.Context, key string, rev int64, opts storage if opts.ProgressNotify && w.newFunc == nil { return nil, apierrors.NewInternalError(errors.New("progressNotify for watch is unsupported by the etcd storage because no newFunc was provided")) } - wc := w.createWatchChan(ctx, key, rev, opts.Recursive, opts.ProgressNotify, opts.Predicate) - go wc.run() + startWatchRV, err := w.getStartWatchResourceVersion(ctx, rev, opts) + if err != nil { + return nil, err + } + wc := w.createWatchChan(ctx, key, startWatchRV, opts.Recursive, opts.ProgressNotify, opts.Predicate) + go wc.run(isInitialEventsEndBookmarkRequired(opts), areInitialEventsRequired(rev, opts)) // For etcd watch we don't have an easy way to answer whether the watch // has already caught up. So in the initial version (given that watchcache @@ -138,6 +144,62 @@ func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, re return wc } +// getStartWatchResourceVersion returns a ResourceVersion +// the watch will be started from. +// Depending on the input parameters the semantics of the returned ResourceVersion are: +// - start at Exact (return resourceVersion) +// - start at Most Recent (return an RV from etcd) +func (w *watcher) getStartWatchResourceVersion(ctx context.Context, resourceVersion int64, opts storage.ListOptions) (int64, error) { + if resourceVersion > 0 { + return resourceVersion, nil + } + if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return 0, nil + } + if opts.SendInitialEvents == nil || *opts.SendInitialEvents { + // note that when opts.SendInitialEvents=true + // we will be issuing a consistent LIST request + // against etcd followed by the special bookmark event + return 0, nil + } + // at this point the clients is interested + // only in getting a stream of events + // starting at the MostRecent point in time (RV) + currentStorageRV, err := w.getCurrentStorageRV(ctx) + if err != nil { + return 0, err + } + // currentStorageRV is taken from resp.Header.Revision (int64) + // and cast to uint64, so it is safe to do reverse + // at some point we should unify the interface but that + // would require changing Versioner.UpdateList + return int64(currentStorageRV), nil +} + +// isInitialEventsEndBookmarkRequired since there is no way to directly set +// opts.ProgressNotify from the API and the etcd3 impl doesn't support +// notification for external clients we simply return initialEventsEndBookmarkRequired +// to only send the bookmark event after the initial list call. +// +// see: https://github.com/kubernetes/kubernetes/issues/120348 +func isInitialEventsEndBookmarkRequired(opts storage.ListOptions) bool { + if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return false + } + return opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks +} + +// areInitialEventsRequired returns true if all events from the etcd should be returned. +func areInitialEventsRequired(resourceVersion int64, opts storage.ListOptions) bool { + if opts.SendInitialEvents == nil && resourceVersion == 0 { + return true // legacy case + } + if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { + return false + } + return opts.SendInitialEvents != nil && *opts.SendInitialEvents +} + type etcdError interface { Code() grpccodes.Code Error() string @@ -163,9 +225,9 @@ func isCancelError(err error) bool { return false } -func (wc *watchChan) run() { +func (wc *watchChan) run(initialEventsEndBookmarkRequired, forceInitialEvents bool) { watchClosedCh := make(chan struct{}) - go wc.startWatching(watchClosedCh) + go wc.startWatching(watchClosedCh, initialEventsEndBookmarkRequired, forceInitialEvents) var resultChanWG sync.WaitGroup resultChanWG.Add(1) @@ -284,14 +346,44 @@ func logWatchChannelErr(err error) { // startWatching does: // - get current objects if initialRev=0; set initialRev to current rev // - watch on given key and send events to process. -func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { - if wc.initialRev == 0 { +// +// initialEventsEndBookmarkSent helps us keep track +// of whether we have sent an annotated bookmark event. +// +// it's important to note that we don't +// need to track the actual RV because +// we only send the bookmark event +// after the initial list call. +// +// when this variable is set to false, +// it means we don't have any specific +// preferences for delivering bookmark events. +func (wc *watchChan) startWatching(watchClosedCh chan struct{}, initialEventsEndBookmarkRequired, forceInitialEvents bool) { + if wc.initialRev > 0 && forceInitialEvents { + currentStorageRV, err := wc.watcher.getCurrentStorageRV(wc.ctx) + if err != nil { + wc.sendError(err) + return + } + if uint64(wc.initialRev) > currentStorageRV { + wc.sendError(storage.NewTooLargeResourceVersionError(uint64(wc.initialRev), currentStorageRV, int(wait.Jitter(1*time.Second, 3).Seconds()))) + return + } + } + if forceInitialEvents { if err := wc.sync(); err != nil { klog.Errorf("failed to sync with latest state: %v", err) wc.sendError(err) return } } + if initialEventsEndBookmarkRequired { + wc.sendEvent(func() *event { + e := progressNotifyEvent(wc.initialRev) + e.isInitialEventsEndBookmark = true + return e + }()) + } opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()} if wc.recursive { opts = append(opts, clientv3.WithPrefix()) @@ -388,6 +480,12 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { klog.Errorf("failed to propagate object version: %v", err) return nil } + if e.isInitialEventsEndBookmark { + if err := storage.AnnotateInitialEventsEndBookmark(object); err != nil { + wc.sendError(fmt.Errorf("error while accessing object's metadata gr: %v, type: %v, obj: %#v, err: %v", wc.watcher.groupResource, wc.watcher.objectType, object, err)) + return nil + } + } res = &watch.Event{ Type: watch.Bookmark, Object: object, diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher_test.go index c5edcc6a0aff6..4a7fe0888a269 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher_test.go @@ -24,14 +24,22 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" clientv3 "go.etcd.io/etcd/client/v3" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/apis/example" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/testserver" storagetesting "k8s.io/apiserver/pkg/storage/testing" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" ) func TestWatch(t *testing.T) { @@ -120,6 +128,16 @@ func TestSendInitialEventsBackwardCompatibility(t *testing.T) { storagetesting.RunSendInitialEventsBackwardCompatibility(ctx, t, store) } +func TestEtcdWatchSemantics(t *testing.T) { + ctx, store, _ := testSetup(t) + storagetesting.RunWatchSemantics(ctx, t, store) +} + +func TestEtcdWatchSemanticInitialEventsExtended(t *testing.T) { + ctx, store, _ := testSetup(t) + storagetesting.RunWatchSemanticInitialEventsExtended(ctx, t, store) +} + // ======================================================================= // Implementation-specific tests are following. // The following tests are exercising the details of the implementation @@ -142,7 +160,7 @@ func TestWatchErrResultNotBlockAfterCancel(t *testing.T) { var wg sync.WaitGroup wg.Add(1) go func() { - w.run() + w.run(false, true) wg.Done() }() w.errChan <- fmt.Errorf("some error") @@ -150,25 +168,89 @@ func TestWatchErrResultNotBlockAfterCancel(t *testing.T) { wg.Wait() } -// TestWatchErrorWhenNoNewFunc checks if an error -// will be returned when establishing a watch -// with progressNotify options set -// when newFunc wasn't provided -func TestWatchErrorWhenNoNewFunc(t *testing.T) { - origCtx, store, _ := testSetup(t, func(opts *setupOptions) { opts.newFunc = nil }) +// TestWatchErrorIncorrectConfiguration checks if an error +// will be returned when the storage hasn't been properly +// initialised for watch requests +func TestWatchErrorIncorrectConfiguration(t *testing.T) { + scenarios := []struct { + name string + setupFn func(opts *setupOptions) + requestOpts storage.ListOptions + enableWatchList bool + expectedErr error + }{ + { + name: "no newFunc provided", + setupFn: func(opts *setupOptions) { opts.newFunc = nil }, + requestOpts: storage.ListOptions{ProgressNotify: true}, + expectedErr: apierrors.NewInternalError(errors.New("progressNotify for watch is unsupported by the etcd storage because no newFunc was provided")), + }, + } + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + if scenario.enableWatchList { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() + } + origCtx, store, _ := testSetup(t, scenario.setupFn) + ctx, cancel := context.WithCancel(origCtx) + defer cancel() + + w, err := store.watcher.Watch(ctx, "/abc", 0, scenario.requestOpts) + if err == nil { + t.Fatalf("expected an error but got none") + } + if w != nil { + t.Fatalf("didn't expect a watcher because the test assumes incorrect store initialisation") + } + if err.Error() != scenario.expectedErr.Error() { + t.Fatalf("unexpected err = %v, expected = %v", err, scenario.expectedErr) + } + }) + } +} + +func TestTooLargeResourceVersionErrorForWatchList(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() + origCtx, store, _ := testSetup(t) ctx, cancel := context.WithCancel(origCtx) defer cancel() + requestOpts := storage.ListOptions{ + SendInitialEvents: ptr.To(true), + Recursive: true, + Predicate: storage.SelectionPredicate{ + Field: fields.Everything(), + Label: labels.Everything(), + AllowWatchBookmarks: true, + }, + } + var expectedErr *apierrors.StatusError + if !errors.As(storage.NewTooLargeResourceVersionError(uint64(102), 1, 0), &expectedErr) { + t.Fatalf("Unable to convert NewTooLargeResourceVersionError to apierrors.StatusError") + } - w, err := store.watcher.Watch(ctx, "/abc", 0, storage.ListOptions{ProgressNotify: true}) - if err == nil { - t.Fatalf("expected an error but got none") + w, err := store.watcher.Watch(ctx, "/abc", int64(102), requestOpts) + if err != nil { + t.Fatal(err) } - if w != nil { - t.Fatalf("didn't expect a watcher because progress notifications cannot be delivered for a watcher without newFunc") + defer w.Stop() + + actualEvent := <-w.ResultChan() + if actualEvent.Type != watch.Error { + t.Fatalf("Unexpected type of the event: %v, expected: %v", actualEvent.Type, watch.Error) + } + actualErr, ok := actualEvent.Object.(*metav1.Status) + if !ok { + t.Fatalf("Expected *apierrors.StatusError, got: %#v", actualEvent.Object) + } + + if actualErr.Details.RetryAfterSeconds <= 0 { + t.Fatalf("RetryAfterSeconds must be > 0, actual value: %v", actualErr.Details.RetryAfterSeconds) } - expectedError := apierrors.NewInternalError(errors.New("progressNotify for watch is unsupported by the etcd storage because no newFunc was provided")) - if err.Error() != expectedError.Error() { - t.Fatalf("unexpected err = %v, expected = %v", err, expectedError) + // rewrite the Details as it contains retry seconds + // and validate the whole struct + expectedErr.ErrStatus.Details = actualErr.Details + if diff := cmp.Diff(*actualErr, expectedErr.ErrStatus); diff != "" { + t.Fatalf("Unexpected error returned, diff: %v", diff) } } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 47534c9781858..93b1e707f66d2 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -62,11 +62,6 @@ type Config struct { Prefix string // Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. Transport TransportConfig - // Paging indicates whether the server implementation should allow paging (if it is - // supported). This is generally configured by feature gating, or by a specific - // resource type not wishing to allow paging, and is not intended for end users to - // set. - Paging bool Codec runtime.Codec // EncodeVersioner is the same groupVersioner used to build the @@ -115,7 +110,6 @@ func (config *Config) ForResource(resource schema.GroupResource) *ConfigForResou func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { return &Config{ - Paging: true, Prefix: prefix, Codec: codec, CompactionInterval: DefaultCompactInterval, diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index f52bba84cc38e..2aab5c76d2135 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -454,7 +454,7 @@ func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc fu if transformer == nil { transformer = identity.NewEncryptCheckTransformer() } - return etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil + return etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig), destroyFunc, nil } // startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the diff --git a/staging/src/k8s.io/apiserver/pkg/storage/testing/store_tests.go b/staging/src/k8s.io/apiserver/pkg/storage/testing/store_tests.go index 8b311f88908f4..c5a1d98ec7c21 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/testing/store_tests.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/testing/store_tests.go @@ -28,7 +28,6 @@ import ( "sync" "testing" - "github.com/google/go-cmp/cmp" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -44,30 +43,45 @@ import ( type KeyValidation func(ctx context.Context, t *testing.T, key string) func RunTestCreate(ctx context.Context, t *testing.T, store storage.Interface, validation KeyValidation) { - out := &example.Pod{} - obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns", SelfLink: "testlink"}} + tests := []struct { + name string + inputObj *example.Pod + expectedError error + }{{ + name: "successful create", + inputObj: &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}, + }, { + name: "create with ResourceVersion set", + inputObj: &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "test-ns", ResourceVersion: "1"}}, + expectedError: storage.ErrResourceVersionSetOnCreate, + }} - // verify that kv pair is empty before set - key := computePodKey(obj) - if err := store.Get(ctx, key, storage.GetOptions{}, out); !storage.IsNotFound(err) { - t.Fatalf("expecting empty result on key %s, got %v", key, err) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out := &example.Pod{} // reset + // verify that kv pair is empty before set + key := computePodKey(tt.inputObj) + if err := store.Get(ctx, key, storage.GetOptions{}, out); !storage.IsNotFound(err) { + t.Fatalf("expecting empty result on key %s, got %v", key, err) + } - if err := store.Create(ctx, key, obj, out, 0); err != nil { - t.Fatalf("Set failed: %v", err) - } - // basic tests of the output - if obj.ObjectMeta.Name != out.ObjectMeta.Name { - t.Errorf("pod name want=%s, get=%s", obj.ObjectMeta.Name, out.ObjectMeta.Name) - } - if out.ResourceVersion == "" { - t.Errorf("output should have non-empty resource version") - } - if out.SelfLink != "" { - t.Errorf("output should have empty selfLink") + err := store.Create(ctx, key, tt.inputObj, out, 0) + if !errors.Is(err, tt.expectedError) { + t.Errorf("expecting error %v, but get: %v", tt.expectedError, err) + } + if err != nil { + return + } + // basic tests of the output + if tt.inputObj.ObjectMeta.Name != out.ObjectMeta.Name { + t.Errorf("pod name want=%s, get=%s", tt.inputObj.ObjectMeta.Name, out.ObjectMeta.Name) + } + if out.ResourceVersion == "" { + t.Errorf("output should have non-empty resource version") + } + validation(ctx, t, key) + }) } - - validation(ctx, t, key) } func RunTestCreateWithTTL(ctx context.Context, t *testing.T, store storage.Interface) { @@ -83,7 +97,7 @@ func RunTestCreateWithTTL(ctx context.Context, t *testing.T, store storage.Inter if err != nil { t.Fatalf("Watch failed: %v", err) } - testCheckEventType(t, watch.Deleted, w) + testCheckEventType(t, w, watch.Deleted) } func RunTestCreateWithKeyExist(ctx context.Context, t *testing.T, store storage.Interface) { @@ -214,15 +228,8 @@ func RunTestGet(ctx context.Context, t *testing.T, store storage.Interface) { } if tt.expectedAlternatives == nil { - ExpectNoDiff(t, fmt.Sprintf("%s: incorrect pod", tt.name), tt.expectedOut, out) + expectNoDiff(t, fmt.Sprintf("%s: incorrect pod", tt.name), tt.expectedOut, out) } else { - toInterfaceSlice := func(pods []*example.Pod) []interface{} { - result := make([]interface{}, 0, len(pods)) - for i := range pods { - result = append(result, pods[i]) - } - return result - } ExpectContains(t, fmt.Sprintf("%s: incorrect pod", tt.name), toInterfaceSlice(tt.expectedAlternatives), out) } }) @@ -268,7 +275,7 @@ func RunTestUnconditionalDelete(ctx context.Context, t *testing.T, store storage t.Errorf("expecting resource version to be updated, but get: %s", out.ResourceVersion) } out.ResourceVersion = storedObj.ResourceVersion - ExpectNoDiff(t, "incorrect pod:", tt.expectedObj, out) + expectNoDiff(t, "incorrect pod:", tt.expectedObj, out) }) } } @@ -310,7 +317,7 @@ func RunTestConditionalDelete(ctx context.Context, t *testing.T, store storage.I t.Errorf("expecting resource version to be updated, but get: %s", out.ResourceVersion) } out.ResourceVersion = storedObj.ResourceVersion - ExpectNoDiff(t, "incorrect pod:", storedObj, out) + expectNoDiff(t, "incorrect pod:", storedObj, out) obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns", UID: "A"}} key, storedObj = testPropagateStore(ctx, t, store, obj) }) @@ -377,6 +384,53 @@ func RunTestDeleteWithSuggestionAndConflict(ctx context.Context, t *testing.T, s if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { t.Errorf("Unexpected error on reading object: %v", err) } + updatedPod.ObjectMeta.ResourceVersion = out.ObjectMeta.ResourceVersion + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + +// RunTestDeleteWithConflict tests the case when another conflicting update happened before the delete completed. +func RunTestDeleteWithConflict(ctx context.Context, t *testing.T, store storage.Interface) { + key, _ := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + validateCount := 0 + updateCount := 0 + // Simulate a conflicting update in the middle of delete. + validateAllWithUpdate := func(_ context.Context, _ runtime.Object) error { + validateCount++ + if validateCount > 1 { + return nil + } + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.Labels = map[string]string{"foo": "bar"} + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + updateCount++ + return nil + } + + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, validateAllWithUpdate, nil); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + if validateCount != 2 { + t.Errorf("Expect validateCount = %d, but got %d", 2, validateCount) + } + if updateCount != 1 { + t.Errorf("Expect updateCount = %d, but got %d", 1, updateCount) + } + + if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { + t.Errorf("Unexpected error on reading object: %v", err) + } + updatedPod.ObjectMeta.ResourceVersion = out.ObjectMeta.ResourceVersion + expectNoDiff(t, "incorrect pod:", updatedPod, out) } func RunTestDeleteWithSuggestionOfDeletedObject(ctx context.Context, t *testing.T, store storage.Interface) { @@ -449,6 +503,64 @@ func RunTestValidateDeletionWithSuggestion(ctx context.Context, t *testing.T, st } } +// RunTestValidateDeletionWithOnlySuggestionValid tests the case of delete with validateDeletion function, +// when the suggested cachedExistingObject passes the validate function while the current version does not pass the validate function. +func RunTestValidateDeletionWithOnlySuggestionValid(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns", Labels: map[string]string{"foo": "bar"}}}) + + // Check that validaing fresh object fails is called once and fails. + validationCalls := 0 + validationError := fmt.Errorf("validation error") + validateNothing := func(_ context.Context, _ runtime.Object) error { + validationCalls++ + return validationError + } + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, validateNothing, originalPod); err != validationError { + t.Errorf("Unexpected failure during deletion: %v", err) + } + if validationCalls != 1 { + t.Errorf("validate function should have been called once, called %d", validationCalls) + } + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.Labels = map[string]string{"foo": "barbar"} + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + calls := 0 + validateFresh := func(_ context.Context, obj runtime.Object) error { + calls++ + pod := obj.(*example.Pod) + if pod.ObjectMeta.Labels == nil || pod.ObjectMeta.Labels["foo"] != "bar" { + return fmt.Errorf("stale object") + } + return nil + } + + err := store.Delete(ctx, key, out, nil, validateFresh, originalPod) + if err == nil || err.Error() != "stale object" { + t.Errorf("expecting stale object error, but get: %s", err) + } + + // Implementations of the storage interface are allowed to ignore the suggestion, + // in which case just one validation call is possible. + if calls > 2 { + t.Errorf("validate function should have been called at most twice, called %d", calls) + } + + if err = store.Get(ctx, key, storage.GetOptions{}, out); err != nil { + t.Errorf("Unexpected error on reading object: %v", err) + } + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + func RunTestPreconditionalDeleteWithSuggestion(ctx context.Context, t *testing.T, store storage.Interface) { key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) @@ -475,6 +587,37 @@ func RunTestPreconditionalDeleteWithSuggestion(ctx context.Context, t *testing.T } } +// RunTestPreconditionalDeleteWithOnlySuggestionPass tests the case of delete with preconditions, +// when the suggested cachedExistingObject passes the preconditions while the current version does not pass the preconditions. +func RunTestPreconditionalDeleteWithOnlySuggestionPass(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns", UID: "myUID"}}) + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.UID = "otherUID" + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + prec := storage.NewUIDPreconditions("myUID") + // Although originalPod passes the precondition, its delete would fail due to conflict. + // The 2nd try with updatedPod would fail the precondition. + out := &example.Pod{} + err := store.Delete(ctx, key, out, prec, storage.ValidateAllObjectFunc, originalPod) + if err == nil || !storage.IsInvalidObj(err) { + t.Errorf("expecting invalid UID error, but get: %s", err) + } + + if err = store.Get(ctx, key, storage.GetOptions{}, out); err != nil { + t.Errorf("Unexpected error on reading object: %v", err) + } + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + func RunTestList(ctx context.Context, t *testing.T, store storage.Interface, compaction Compaction, ignoreWatchCacheTests bool) { initialRV, preset, err := seedMultiLevelData(ctx, store) if err != nil { @@ -1091,95 +1234,14 @@ func RunTestList(ctx context.Context, t *testing.T, store storage.Interface, com if tt.expectedAlternatives == nil { sort.Sort(sortablePodList(tt.expectedOut)) - ExpectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) + expectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) } else { - toInterfaceSlice := func(podLists [][]example.Pod) []interface{} { - result := make([]interface{}, 0, len(podLists)) - for i := range podLists { - sort.Sort(sortablePodList(podLists[i])) - result = append(result, podLists[i]) - } - return result - } ExpectContains(t, "incorrect list pods", toInterfaceSlice(tt.expectedAlternatives), out.Items) } }) } } -func RunTestListWithoutPaging(ctx context.Context, t *testing.T, store storage.Interface) { - _, preset, err := seedMultiLevelData(ctx, store) - if err != nil { - t.Fatal(err) - } - - getAttrs := func(obj runtime.Object) (labels.Set, fields.Set, error) { - pod := obj.(*example.Pod) - return nil, fields.Set{"metadata.name": pod.Name}, nil - } - - tests := []struct { - name string - disablePaging bool - rv string - rvMatch metav1.ResourceVersionMatch - prefix string - pred storage.SelectionPredicate - expectedOut []*example.Pod - expectContinue bool - expectedRemainingItemCount *int64 - expectError bool - }{ - { - name: "test List with limit when paging disabled", - disablePaging: true, - prefix: "/pods/second/", - pred: storage.SelectionPredicate{ - Label: labels.Everything(), - Field: fields.Everything(), - Limit: 1, - }, - expectedOut: []*example.Pod{preset[1], preset[2]}, - expectContinue: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.pred.GetAttrs == nil { - tt.pred.GetAttrs = getAttrs - } - - out := &example.PodList{} - storageOpts := storage.ListOptions{ - ResourceVersion: tt.rv, - ResourceVersionMatch: tt.rvMatch, - Predicate: tt.pred, - Recursive: true, - } - - if err := store.GetList(ctx, tt.prefix, storageOpts, out); err != nil { - t.Fatalf("GetList failed: %v", err) - return - } - if (len(out.Continue) > 0) != tt.expectContinue { - t.Errorf("unexpected continue token: %q", out.Continue) - } - - if len(tt.expectedOut) != len(out.Items) { - t.Fatalf("length of list want=%d, got=%d", len(tt.expectedOut), len(out.Items)) - } - if diff := cmp.Diff(tt.expectedRemainingItemCount, out.ListMeta.GetRemainingItemCount()); diff != "" { - t.Errorf("incorrect remainingItemCount: %s", diff) - } - for j, wantPod := range tt.expectedOut { - getPod := &out.Items[j] - ExpectNoDiff(t, fmt.Sprintf("%s: incorrect pod", tt.name), wantPod, getPod) - } - }) - } -} - // seedMultiLevelData creates a set of keys with a multi-level structure, returning a resourceVersion // from before any were created along with the full set of objects that were persisted func seedMultiLevelData(ctx context.Context, store storage.Interface) (string, []*example.Pod, error) { @@ -1379,6 +1441,19 @@ func RunTestGetListNonRecursive(ctx context.Context, t *testing.T, store storage }, }, expectedOut: []example.Pod{}, + }, { + name: "existing key, resourceVersion=current, with not matching pod name", + key: key, + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=" + storedObj.Name), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + }, + expectedOut: []example.Pod{}, + rv: fmt.Sprintf("%d", currentRV), }} for _, tt := range tests { @@ -1417,15 +1492,8 @@ func RunTestGetListNonRecursive(ctx context.Context, t *testing.T, store storage } if tt.expectedAlternatives == nil { - ExpectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) + expectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) } else { - toInterfaceSlice := func(podLists [][]example.Pod) []interface{} { - result := make([]interface{}, 0, len(podLists)) - for i := range podLists { - result = append(result, podLists[i]) - } - return result - } ExpectContains(t, "incorrect list pods", toInterfaceSlice(tt.expectedAlternatives), out.Items) } }) @@ -1465,12 +1533,14 @@ func RunTestListContinuation(ctx context.Context, t *testing.T, store storage.In }, } + var currentRV string for i, ps := range preset { preset[i].storedObj = &example.Pod{} err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) if err != nil { t.Fatalf("Set failed: %v", err) } + currentRV = preset[i].storedObj.ResourceVersion } // test continuations @@ -1498,7 +1568,10 @@ func RunTestListContinuation(ctx context.Context, t *testing.T, store storage.In if len(out.Continue) == 0 { t.Fatalf("No continuation token set") } - ExpectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj}, out.Items) + expectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } if validation != nil { validation(t, 1, 1) } @@ -1520,7 +1593,10 @@ func RunTestListContinuation(ctx context.Context, t *testing.T, store storage.In } key, rv, err := storage.DecodeContinue(continueFromSecondItem, "/pods") t.Logf("continue token was %d %s %v", rv, key, err) - ExpectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj, *preset[2].storedObj}, out.Items) + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj, *preset[2].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } if validation != nil { validation(t, 0, 2) } @@ -1538,7 +1614,10 @@ func RunTestListContinuation(ctx context.Context, t *testing.T, store storage.In if len(out.Continue) == 0 { t.Fatalf("No continuation token set") } - ExpectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj}, out.Items) + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } if validation != nil { validation(t, 1, 1) } @@ -1557,7 +1636,10 @@ func RunTestListContinuation(ctx context.Context, t *testing.T, store storage.In if len(out.Continue) != 0 { t.Fatalf("Unexpected continuation token set") } - ExpectNoDiff(t, "incorrect third page", []example.Pod{*preset[2].storedObj}, out.Items) + expectNoDiff(t, "incorrect third page", []example.Pod{*preset[2].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } if validation != nil { validation(t, 1, 1) } @@ -1632,12 +1714,14 @@ func RunTestListContinuationWithFilter(ctx context.Context, t *testing.T, store }, } + var currentRV string for i, ps := range preset { preset[i].storedObj = &example.Pod{} err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) if err != nil { t.Fatalf("Set failed: %v", err) } + currentRV = preset[i].storedObj.ResourceVersion } // the first list call should try to get 2 items from etcd (and only those items should be returned) @@ -1668,7 +1752,10 @@ func RunTestListContinuationWithFilter(ctx context.Context, t *testing.T, store if len(out.Continue) == 0 { t.Errorf("No continuation token set") } - ExpectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj, *preset[2].storedObj}, out.Items) + expectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj, *preset[2].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } if validation != nil { validation(t, 2, 3) } @@ -1695,7 +1782,10 @@ func RunTestListContinuationWithFilter(ctx context.Context, t *testing.T, store if len(out.Continue) != 0 { t.Errorf("Unexpected continuation token set") } - ExpectNoDiff(t, "incorrect second page", []example.Pod{*preset[3].storedObj}, out.Items) + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[3].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } if validation != nil { validation(t, 2, 1) } @@ -1770,7 +1860,7 @@ func RunTestListInconsistentContinuation(ctx context.Context, t *testing.T, stor if len(out.Continue) == 0 { t.Fatalf("No continuation token set") } - ExpectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj}, out.Items) + expectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj}, out.Items) continueFromSecondItem := out.Continue @@ -1830,7 +1920,7 @@ func RunTestListInconsistentContinuation(ctx context.Context, t *testing.T, stor t.Fatalf("No continuation token set") } validateResourceVersion := resourceVersionNotOlderThan(lastRVString) - ExpectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj}, out.Items) + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj}, out.Items) if err := validateResourceVersion(out.ResourceVersion); err != nil { t.Fatal(err) } @@ -1848,7 +1938,7 @@ func RunTestListInconsistentContinuation(ctx context.Context, t *testing.T, stor if len(out.Continue) != 0 { t.Fatalf("Unexpected continuation token set") } - ExpectNoDiff(t, "incorrect third page", []example.Pod{*preset[2].storedObj}, out.Items) + expectNoDiff(t, "incorrect third page", []example.Pod{*preset[2].storedObj}, out.Items) if out.ResourceVersion != resolvedResourceVersionFromThirdItem { t.Fatalf("Expected list resource version to be %s, got %s", resolvedResourceVersionFromThirdItem, out.ResourceVersion) } @@ -1928,7 +2018,7 @@ func RunTestConsistentList(ctx context.Context, t *testing.T, store InterfaceWit t.Fatalf("failed to list objects: %v", err) } - ExpectNoDiff(t, "incorrect lists", result1, result2) + expectNoDiff(t, "incorrect lists", result1, result2) // Now also verify the ResourceVersionMatchNotOlderThan. options.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan @@ -1946,7 +2036,7 @@ func RunTestConsistentList(ctx context.Context, t *testing.T, store InterfaceWit t.Fatalf("failed to list objects: %v", err) } - ExpectNoDiff(t, "incorrect lists", result3, result4) + expectNoDiff(t, "incorrect lists", result3, result4) } func RunTestGuaranteedUpdate(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer, validation KeyValidation) { @@ -2123,7 +2213,7 @@ func RunTestGuaranteedUpdateWithTTL(ctx context.Context, t *testing.T, store sto if err != nil { t.Fatalf("Watch failed: %v", err) } - testCheckEventType(t, watch.Deleted, w) + testCheckEventType(t, w, watch.Deleted) } func RunTestGuaranteedUpdateChecksStoredData(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer) { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/testing/utils.go b/staging/src/k8s.io/apiserver/pkg/storage/testing/utils.go index 67559ecd20f24..5d1fb3aa8b546 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/testing/utils.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/testing/utils.go @@ -100,13 +100,13 @@ func testPropagateStore(ctx context.Context, t *testing.T, store storage.Interfa return key, setOutput } -func ExpectNoDiff(t *testing.T, msg string, expected, got interface{}) { +func expectNoDiff(t *testing.T, msg string, expected, actual interface{}) { t.Helper() - if !reflect.DeepEqual(expected, got) { - if diff := cmp.Diff(expected, got); diff != "" { + if !reflect.DeepEqual(expected, actual) { + if diff := cmp.Diff(expected, actual); diff != "" { t.Errorf("%s: %s", msg, diff) } else { - t.Errorf("%s:\nexpected: %#v\ngot: %#v", msg, expected, got) + t.Errorf("%s:\nexpected: %#v\ngot: %#v", msg, expected, actual) } } } @@ -139,7 +139,7 @@ func encodeContinueOrDie(key string, resourceVersion int64) string { return token } -func testCheckEventType(t *testing.T, expectEventType watch.EventType, w watch.Interface) { +func testCheckEventType(t *testing.T, w watch.Interface, expectEventType watch.EventType) { select { case res := <-w.ResultChan(): if res.Type != expectEventType { @@ -150,27 +150,20 @@ func testCheckEventType(t *testing.T, expectEventType watch.EventType, w watch.I } } -func testCheckResult(t *testing.T, expectEventType watch.EventType, w watch.Interface, expectObj runtime.Object) { - testCheckResultFunc(t, expectEventType, w, func(object runtime.Object) error { - ExpectNoDiff(t, "incorrect object", expectObj, object) - return nil +func testCheckResult(t *testing.T, w watch.Interface, expectEvent watch.Event) { + testCheckResultFunc(t, w, func(actualEvent watch.Event) { + expectNoDiff(t, "incorrect event", expectEvent, actualEvent) }) } -func testCheckResultFunc(t *testing.T, expectEventType watch.EventType, w watch.Interface, check func(object runtime.Object) error) { +func testCheckResultFunc(t *testing.T, w watch.Interface, check func(actualEvent watch.Event)) { select { case res := <-w.ResultChan(): - if res.Type != expectEventType { - t.Errorf("event type want=%v, get=%v", expectEventType, res.Type) - return - } obj := res.Object if co, ok := obj.(runtime.CacheableObject); ok { - obj = co.GetObject() - } - if err := check(obj); err != nil { - t.Error(err) + res.Object = co.GetObject() } + check(res) case <-time.After(wait.ForeverTestTimeout): t.Errorf("time out after waiting %v on ResultChan", wait.ForeverTestTimeout) } @@ -194,6 +187,37 @@ func testCheckStop(t *testing.T, w watch.Interface) { } } +func testCheckResultsInStrictOrder(t *testing.T, w watch.Interface, expectedEvents []watch.Event) { + for _, expectedEvent := range expectedEvents { + testCheckResult(t, w, expectedEvent) + } +} + +func testCheckResultsInRandomOrder(t *testing.T, w watch.Interface, expectedEvents []watch.Event) { + for range expectedEvents { + testCheckResultFunc(t, w, func(actualEvent watch.Event) { + ExpectContains(t, "unexpected event", toInterfaceSlice(expectedEvents), actualEvent) + }) + } +} + +func testCheckNoMoreResults(t *testing.T, w watch.Interface) { + select { + case e := <-w.ResultChan(): + t.Errorf("Unexpected: %#v event received, expected no events", e) + case <-time.After(time.Second): + return + } +} + +func toInterfaceSlice[T any](s []T) []interface{} { + result := make([]interface{}, len(s)) + for i, v := range s { + result[i] = v + } + return result +} + // resourceVersionNotOlderThan returns a function to validate resource versions. Resource versions // referring to points in logical time before the sentinel generate an error. All logical times as // new as the sentinel or newer generate no error. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/testing/watcher_tests.go b/staging/src/k8s.io/apiserver/pkg/storage/testing/watcher_tests.go index 1f08b11c2a5dd..e24dea401fb53 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/testing/watcher_tests.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/testing/watcher_tests.go @@ -36,9 +36,12 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/apis/example" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/utils/pointer" ) @@ -184,7 +187,7 @@ func testWatch(ctx context.Context, t *testing.T, store storage.Interface, recur expectObj = prevObj expectObj.ResourceVersion = out.ResourceVersion } - testCheckResult(t, watchTest.watchType, w, expectObj) + testCheckResult(t, w, watch.Event{Type: watchTest.watchType, Object: expectObj}) } prevObj = out } @@ -206,7 +209,7 @@ func RunTestWatchFromZero(ctx context.Context, t *testing.T, store storage.Inter if err != nil { t.Fatalf("Watch failed: %v", err) } - testCheckResult(t, watch.Added, w, storedObj) + testCheckResult(t, w, watch.Event{Type: watch.Added, Object: storedObj}) // Update out := &example.Pod{} @@ -223,7 +226,7 @@ func RunTestWatchFromZero(ctx context.Context, t *testing.T, store storage.Inter // when testing with the Cacher since we may have to allow for slow // processing by allowing updates to propagate to the watch cache. // This allows for that. - testCheckResult(t, watch.Modified, w, out) + testCheckResult(t, w, watch.Event{Type: watch.Modified, Object: out}) w.Stop() // Make sure when we watch from 0 we receive an ADDED event @@ -232,7 +235,7 @@ func RunTestWatchFromZero(ctx context.Context, t *testing.T, store storage.Inter t.Fatalf("Watch failed: %v", err) } - testCheckResult(t, watch.Added, w, out) + testCheckResult(t, w, watch.Event{Type: watch.Added, Object: out}) w.Stop() // Compact previous versions @@ -259,7 +262,7 @@ func RunTestWatchFromZero(ctx context.Context, t *testing.T, store storage.Inter if err != nil { t.Fatalf("Watch failed: %v", err) } - testCheckResult(t, watch.Added, w, newOut) + testCheckResult(t, w, watch.Event{Type: watch.Added, Object: newOut}) // Make sure we can't watch from older resource versions anymoer and get a "Gone" error. tooOldWatcher, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: out.ResourceVersion, Predicate: storage.Everything}) @@ -276,11 +279,11 @@ func RunTestWatchFromZero(ctx context.Context, t *testing.T, store storage.Inter Code: http.StatusInternalServerError, Reason: metav1.StatusReasonInternalError, } - testCheckResultFunc(t, watch.Error, tooOldWatcher, func(obj runtime.Object) error { - if !apiequality.Semantic.DeepDerivative(&expiredError, obj) && !apiequality.Semantic.DeepDerivative(&internalError, obj) { - t.Errorf("expected: %#v; got %#v", &expiredError, obj) + testCheckResultFunc(t, tooOldWatcher, func(actualEvent watch.Event) { + expectNoDiff(t, "incorrect event type", watch.Error, actualEvent.Type) + if !apiequality.Semantic.DeepDerivative(&expiredError, actualEvent.Object) && !apiequality.Semantic.DeepDerivative(&internalError, actualEvent.Object) { + t.Errorf("expected: %#v; got %#v", &expiredError, actualEvent.Object) } - return nil }) } @@ -293,7 +296,7 @@ func RunTestDeleteTriggerWatch(ctx context.Context, t *testing.T, store storage. if err := store.Delete(ctx, key, &example.Pod{}, nil, storage.ValidateAllObjectFunc, nil); err != nil { t.Fatalf("Delete failed: %v", err) } - testCheckEventType(t, watch.Deleted, w) + testCheckEventType(t, w, watch.Deleted) } func RunTestWatchFromNonZero(ctx context.Context, t *testing.T, store storage.Interface) { @@ -310,7 +313,7 @@ func RunTestWatchFromNonZero(ctx context.Context, t *testing.T, store storage.In newObj.Annotations = map[string]string{"version": "2"} return newObj, nil }), nil) - testCheckResult(t, watch.Modified, w, out) + testCheckResult(t, w, watch.Event{Type: watch.Modified, Object: out}) } func RunTestDelayedWatchDelivery(ctx context.Context, t *testing.T, store storage.Interface) { @@ -400,7 +403,7 @@ func RunTestWatchError(ctx context.Context, t *testing.T, store InterfaceWithPre if err != nil { t.Fatalf("Watch failed: %v", err) } - testCheckEventType(t, watch.Error, w) + testCheckEventType(t, w, watch.Error) } func RunTestWatchContextCancel(ctx context.Context, t *testing.T, store storage.Interface) { @@ -476,7 +479,7 @@ func RunTestWatcherTimeout(ctx context.Context, t *testing.T, store storage.Inte if err := store.Create(ctx, computePodKey(pod), pod, out, 0); err != nil { t.Fatalf("Create failed: %v", err) } - testCheckResult(t, watch.Added, readingWatcher, out) + testCheckResult(t, readingWatcher, watch.Event{Type: watch.Added, Object: out}) } if time.Since(startTime) > time.Duration(250*nonReadingWatchers)*time.Millisecond { t.Errorf("waiting for events took too long: %v", time.Since(startTime)) @@ -503,7 +506,7 @@ func RunTestWatchDeleteEventObjectHaveLatestRV(ctx context.Context, t *testing.T t.Fatalf("ResourceVersion didn't changed on deletion: %s", deletedObj.ResourceVersion) } - testCheckResult(t, watch.Deleted, w, deletedObj) + testCheckResult(t, w, watch.Event{Type: watch.Deleted, Object: deletedObj}) } func RunTestWatchInitializationSignal(ctx context.Context, t *testing.T, store storage.Interface) { @@ -546,24 +549,24 @@ func RunOptionalTestProgressNotify(ctx context.Context, t *testing.T, store stor // when we send a bookmark event, the client expects the event to contain an // object of the correct type, but with no fields set other than the resourceVersion - testCheckResultFunc(t, watch.Bookmark, w, func(object runtime.Object) error { + testCheckResultFunc(t, w, func(actualEvent watch.Event) { + expectNoDiff(t, "incorrect event type", watch.Bookmark, actualEvent.Type) // first, check that we have the correct resource version - obj, ok := object.(metav1.Object) + obj, ok := actualEvent.Object.(metav1.Object) if !ok { - return fmt.Errorf("got %T, not metav1.Object", object) + t.Fatalf("got %T, not metav1.Object", actualEvent.Object) } if err := validateResourceVersion(obj.GetResourceVersion()); err != nil { - return err + t.Fatal(err) } // then, check that we have the right type and content - pod, ok := object.(*example.Pod) + pod, ok := actualEvent.Object.(*example.Pod) if !ok { - return fmt.Errorf("got %T, not *example.Pod", object) + t.Fatalf("got %T, not *example.Pod", actualEvent.Object) } pod.ResourceVersion = "" - ExpectNoDiff(t, "bookmark event should contain an object with no fields set other than resourceVersion", &example.Pod{}, pod) - return nil + expectNoDiff(t, "bookmark event should contain an object with no fields set other than resourceVersion", &example.Pod{}, pod) }) } @@ -712,7 +715,7 @@ func RunTestClusterScopedWatch(ctx context.Context, t *testing.T, store storage. currentObjs[watchTest.obj.Name] = out } if watchTest.expectEvent { - testCheckResult(t, watchTest.watchType, w, expectObj) + testCheckResult(t, w, watch.Event{Type: watchTest.watchType, Object: expectObj}) } } w.Stop() @@ -1027,7 +1030,7 @@ func RunTestNamespaceScopedWatch(ctx context.Context, t *testing.T, store storag currentObjs[podIdentifier] = out } if watchTest.expectEvent { - testCheckResult(t, watchTest.watchType, w, expectObj) + testCheckResult(t, w, watch.Event{Type: watchTest.watchType, Object: expectObj}) } } w.Stop() @@ -1219,6 +1222,346 @@ func RunSendInitialEventsBackwardCompatibility(ctx context.Context, t *testing.T w.Stop() } +// RunWatchSemantics test the following cases: +// +// +-----------------+---------------------+-------------------+ +// | ResourceVersion | AllowWatchBookmarks | SendInitialEvents | +// +=================+=====================+===================+ +// | Unset | true/false | true/false | +// | 0 | true/false | true/false | +// | 1 | true/false | true/false | +// | Current | true/false | true/false | +// +-----------------+---------------------+-------------------+ +// where: +// - false indicates the value of the param was set to "false" by a test case +// - true indicates the value of the param was set to "true" by a test case +func RunWatchSemantics(ctx context.Context, t *testing.T, store storage.Interface) { + trueVal, falseVal := true, false + addEventsFromCreatedPods := func(createdInitialPods []*example.Pod) []watch.Event { + var ret []watch.Event + for _, createdPod := range createdInitialPods { + ret = append(ret, watch.Event{Type: watch.Added, Object: createdPod}) + } + return ret + } + initialEventsEndFromLastCreatedPod := func(createdInitialPods []*example.Pod) watch.Event { + return watch.Event{ + Type: watch.Bookmark, + Object: &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: createdInitialPods[len(createdInitialPods)-1].ResourceVersion, + Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, + }, + }, + } + } + scenarios := []struct { + name string + allowWatchBookmarks bool + sendInitialEvents *bool + resourceVersion string + // useCurrentRV if set gets the current RV from the storage + // after adding the initial pods which is then used to establish a new watch request + useCurrentRV bool + + initialPods []*example.Pod + podsAfterEstablishingWatch []*example.Pod + + expectedInitialEventsInRandomOrder func(createdInitialPods []*example.Pod) []watch.Event + expectedInitialEventsInStrictOrder func(createdInitialPods []*example.Pod) []watch.Event + expectedEventsAfterEstablishingWatch func(createdPodsAfterWatch []*example.Pod) []watch.Event + }{ + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=unset", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=unset", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=unset", + sendInitialEvents: &falseVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=unset", + sendInitialEvents: &trueVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=0", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=0", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=0", + sendInitialEvents: &falseVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=0", + sendInitialEvents: &trueVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=1", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=1", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInStrictOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=1", + sendInitialEvents: &falseVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInStrictOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=1", + sendInitialEvents: &trueVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=useCurrentRV", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=useCurrentRV", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=useCurrentRV", + sendInitialEvents: &falseVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=useCurrentRV", + sendInitialEvents: &trueVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "legacy, RV=0", + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "legacy, RV=unset", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + } + for idx, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // set up env + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() + if scenario.expectedInitialEventsInStrictOrder == nil { + scenario.expectedInitialEventsInStrictOrder = func(_ []*example.Pod) []watch.Event { return nil } + } + if scenario.expectedInitialEventsInRandomOrder == nil { + scenario.expectedInitialEventsInRandomOrder = func(_ []*example.Pod) []watch.Event { return nil } + } + if scenario.expectedEventsAfterEstablishingWatch == nil { + scenario.expectedEventsAfterEstablishingWatch = func(_ []*example.Pod) []watch.Event { return nil } + } + + var createdPods []*example.Pod + ns := fmt.Sprintf("ns-%v", idx) + for _, obj := range scenario.initialPods { + obj.Namespace = ns + out := &example.Pod{} + err := store.Create(ctx, computePodKey(obj), obj, out, 0) + require.NoError(t, err, "failed to add a pod: %v", obj) + createdPods = append(createdPods, out) + } + + if scenario.useCurrentRV { + currentStorageRV, err := storage.GetCurrentResourceVersionFromStorage(ctx, store, func() runtime.Object { return &example.PodList{} }, "/pods", "") + require.NoError(t, err) + scenario.resourceVersion = fmt.Sprintf("%d", currentStorageRV) + } + + opts := storage.ListOptions{Predicate: storage.Everything, Recursive: true} + opts.SendInitialEvents = scenario.sendInitialEvents + opts.Predicate.AllowWatchBookmarks = scenario.allowWatchBookmarks + if len(scenario.resourceVersion) > 0 { + opts.ResourceVersion = scenario.resourceVersion + } + + w, err := store.Watch(context.Background(), fmt.Sprintf("/pods/%s", ns), opts) + require.NoError(t, err, "failed to create watch: %v") + defer w.Stop() + + // make sure we only get initial events + testCheckResultsInRandomOrder(t, w, scenario.expectedInitialEventsInRandomOrder(createdPods)) + testCheckResultsInStrictOrder(t, w, scenario.expectedInitialEventsInStrictOrder(createdPods)) + testCheckNoMoreResults(t, w) + + createdPods = []*example.Pod{} + // add a pod that is greater than the storage's RV when the watch was started + for _, obj := range scenario.podsAfterEstablishingWatch { + obj.Namespace = ns + out := &example.Pod{} + err = store.Create(ctx, computePodKey(obj), obj, out, 0) + require.NoError(t, err, "failed to add a pod: %v") + createdPods = append(createdPods, out) + } + testCheckResultsInStrictOrder(t, w, scenario.expectedEventsAfterEstablishingWatch(createdPods)) + testCheckNoMoreResults(t, w) + }) + } +} + +// RunWatchSemanticInitialEventsExtended checks if the bookmark event +// marking the end of the list stream contains the global RV. +// +// note that this scenario differs from the one in RunWatchSemantics +// by adding the pod to a different ns to advance the global RV +func RunWatchSemanticInitialEventsExtended(ctx context.Context, t *testing.T, store storage.Interface) { + trueVal := true + expectedInitialEventsInStrictOrder := func(firstPod, secondPod *example.Pod) []watch.Event { + return []watch.Event{ + {Type: watch.Added, Object: firstPod}, + {Type: watch.Bookmark, Object: &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: secondPod.ResourceVersion, + Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, + }, + }}, + } + } + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() + + ns := "ns-foo" + pod := makePod("1") + pod.Namespace = ns + firstPod := &example.Pod{} + err := store.Create(ctx, computePodKey(pod), pod, firstPod, 0) + require.NoError(t, err, "failed to add a pod: %v") + + // add the pod to a different ns to advance the global RV + pod = makePod("2") + pod.Namespace = "other-ns-foo" + secondPod := &example.Pod{} + err = store.Create(ctx, computePodKey(pod), pod, secondPod, 0) + require.NoError(t, err, "failed to add a pod: %v") + + opts := storage.ListOptions{Predicate: storage.Everything, Recursive: true} + opts.SendInitialEvents = &trueVal + opts.Predicate.AllowWatchBookmarks = true + + w, err := store.Watch(context.Background(), fmt.Sprintf("/pods/%s", ns), opts) + require.NoError(t, err, "failed to create watch: %v") + defer w.Stop() + + // make sure we only get initial events from the first ns + // followed by the bookmark with the global RV + testCheckResultsInStrictOrder(t, w, expectedInitialEventsInStrictOrder(firstPod, secondPod)) + testCheckNoMoreResults(t, w) +} + +func makePod(namePrefix string) *example.Pod { + return &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-%s", namePrefix), + }, + } +} + type testWatchStruct struct { obj *example.Pod expectEvent bool diff --git a/staging/src/k8s.io/apiserver/pkg/storage/util.go b/staging/src/k8s.io/apiserver/pkg/storage/util.go index 7b23c7a7af8c1..6d5fb36d24ec5 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/util.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/util.go @@ -29,6 +29,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + // initialEventsAnnotationKey the name of the key + // under which an annotation marking the end of list stream + // is kept. + initialEventsAnnotationKey = "k8s.io/initial-events-end" +) + type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) // SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc @@ -137,7 +144,18 @@ func AnnotateInitialEventsEndBookmark(obj runtime.Object) error { if objAnnotations == nil { objAnnotations = map[string]string{} } - objAnnotations["k8s.io/initial-events-end"] = "true" + objAnnotations[initialEventsAnnotationKey] = "true" objMeta.SetAnnotations(objAnnotations) return nil } + +// HasInitialEventsEndBookmarkAnnotation checks the presence of the +// special annotation which marks that the initial events have been sent. +func HasInitialEventsEndBookmarkAnnotation(obj runtime.Object) (bool, error) { + objMeta, err := meta.Accessor(obj) + if err != nil { + return false, err + } + objAnnotations := objMeta.GetAnnotations() + return objAnnotations[initialEventsAnnotationKey] == "true", nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/util_test.go b/staging/src/k8s.io/apiserver/pkg/storage/util_test.go index 2a1b737b81613..8f44d44411a1b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/util_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/util_test.go @@ -85,7 +85,7 @@ func TestGetCurrentResourceVersionFromStorage(t *testing.T) { // test data newEtcdTestStorage := func(t *testing.T, prefix string) (*etcd3testing.EtcdTestServer, storage.Interface) { server, _ := etcd3testing.NewUnsecuredEtcd3TestClientServer(t) - storage := etcd3.New(server.V3Client, apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion, example2v1.SchemeGroupVersion), func() runtime.Object { return &example.Pod{} }, func() runtime.Object { return &example.PodList{} }, prefix, "/pods", schema.GroupResource{Resource: "pods"}, identity.NewEncryptCheckTransformer(), true, etcd3.NewDefaultLeaseManagerConfig()) + storage := etcd3.New(server.V3Client, apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion, example2v1.SchemeGroupVersion), func() runtime.Object { return &example.Pod{} }, func() runtime.Object { return &example.PodList{} }, prefix, "/pods", schema.GroupResource{Resource: "pods"}, identity.NewEncryptCheckTransformer(), etcd3.NewDefaultLeaseManagerConfig()) return server, storage } server, etcdStorage := newEtcdTestStorage(t, "") @@ -146,3 +146,42 @@ func TestGetCurrentResourceVersionFromStorage(t *testing.T) { require.NoError(t, err) require.Equal(t, currentPodRV, podRV, "didn't expect to see the pod's RV changed") } + +func TestHasInitialEventsEndBookmarkAnnotation(t *testing.T) { + createPod := func(name string) *example.Pod { + return &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}} + } + createAnnotatedPod := func(name, value string) *example.Pod { + p := createPod(name) + p.Annotations = map[string]string{} + p.Annotations["k8s.io/initial-events-end"] = value + return p + } + scenarios := []struct { + name string + object runtime.Object + expectAnnotation bool + }{ + { + name: "a standard obj with the initial-events-end annotation set to true", + object: createAnnotatedPod("p1", "true"), + expectAnnotation: true, + }, + { + name: "a standard obj with the initial-events-end annotation set to false", + object: createAnnotatedPod("p1", "false"), + }, + { + name: "a standard obj without the annotation", + object: createPod("p1"), + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(scenario.object) + require.NoError(t, err) + require.Equal(t, scenario.expectAnnotation, hasAnnotation) + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go index 23de3717a1804..a20b10fc3cb8d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go @@ -28,6 +28,7 @@ import ( "unsafe" "github.com/gogo/protobuf/proto" + "go.opentelemetry.io/otel/attribute" "golang.org/x/crypto/cryptobyte" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -39,6 +40,7 @@ import ( aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" kmstypes "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2" "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics" + "k8s.io/component-base/tracing" "k8s.io/klog/v2" kmsservice "k8s.io/kms/pkg/service" "k8s.io/utils/clock" @@ -50,8 +52,10 @@ func init() { } const ( - // KMSAPIVersion is the version of the KMS API. - KMSAPIVersion = "v2beta1" + // KMSAPIVersionv2 is a version of the KMS API. + KMSAPIVersionv2 = "v2" + // KMSAPIVersionv2beta1 is a version of the KMS API. + KMSAPIVersionv2beta1 = "v2beta1" // annotationsMaxSize is the maximum size of the annotations. annotationsMaxSize = 32 * 1024 // 32 kB // KeyIDMaxSize is the maximum size of the keyID. @@ -133,11 +137,28 @@ func newEnvelopeTransformerWithClock(envelopeService kmsservice.Service, provide // TransformFromStorage decrypts data encrypted by this transformer using envelope encryption. func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { + ctx, span := tracing.Start(ctx, "TransformFromStorage with envelopeTransformer", + attribute.String("transformer.provider.name", t.providerName), + // The service.instance_id of the apiserver is already available in the trace + /* + { + "key": "service.instance.id", + "type": "string", + "value": "apiserver-zsteyir5lyrtdcmqqmd5kzze6m" + } + */ + ) + defer span.End(500 * time.Millisecond) + + span.AddEvent("About to decode encrypted object") // Deserialize the EncryptedObject from the data. encryptedObject, err := t.doDecode(data) if err != nil { + span.AddEvent("Decoding encrypted object failed") + span.RecordError(err) return nil, false, err } + span.AddEvent("Decoded encrypted object") useSeed := encryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED @@ -158,6 +179,7 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b // fallback to the envelope service if we do not have the transformer locally if transformer == nil { + span.AddEvent("About to decrypt DEK using remote service") value.RecordCacheMiss() requestInfo := getRequestInfoFromContext(ctx) @@ -172,8 +194,11 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b Annotations: encryptedObject.Annotations, }) if err != nil { + span.AddEvent("DEK decryption failed") + span.RecordError(err) return nil, false, fmt.Errorf("failed to decrypt DEK, error: %w", err) } + span.AddEvent("DEK decryption succeeded") transformer, err = t.addTransformerForDecryption(encryptedObjectCacheKey, key, useSeed) if err != nil { @@ -182,11 +207,15 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b } metrics.RecordKeyID(metrics.FromStorageLabel, t.providerName, encryptedObject.KeyID, t.apiServerID) + span.AddEvent("About to decrypt data using DEK") out, stale, err := transformer.TransformFromStorage(ctx, encryptedObject.EncryptedData, dataCtx) if err != nil { + span.AddEvent("Data decryption failed") + span.RecordError(err) return nil, false, err } + span.AddEvent("Data decryption succeeded") // data is considered stale if the key ID does not match our current write transformer return out, stale || @@ -197,6 +226,19 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b // TransformToStorage encrypts data to be written to disk using envelope encryption. func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { + ctx, span := tracing.Start(ctx, "TransformToStorage with envelopeTransformer", + attribute.String("transformer.provider.name", t.providerName), + // The service.instance_id of the apiserver is already available in the trace + /* + { + "key": "service.instance.id", + "type": "string", + "value": "apiserver-zsteyir5lyrtdcmqqmd5kzze6m" + } + */ + ) + defer span.End(500 * time.Millisecond) + state, err := t.stateFunc() if err != nil { return nil, err @@ -215,18 +257,31 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt "group", requestInfo.APIGroup, "version", requestInfo.APIVersion, "resource", requestInfo.Resource, "subresource", requestInfo.Subresource, "verb", requestInfo.Verb, "namespace", requestInfo.Namespace, "name", requestInfo.Name) + span.AddEvent("About to encrypt data using DEK") result, err := state.Transformer.TransformToStorage(ctx, data, dataCtx) if err != nil { + span.AddEvent("Data encryption failed") + span.RecordError(err) return nil, err } + span.AddEvent("Data encryption succeeded") metrics.RecordKeyID(metrics.ToStorageLabel, t.providerName, state.EncryptedObject.KeyID, t.apiServerID) encObjectCopy := state.EncryptedObject encObjectCopy.EncryptedData = result + span.AddEvent("About to encode encrypted object") // Serialize the EncryptedObject to a byte array. - return t.doEncode(&encObjectCopy) + out, err := t.doEncode(&encObjectCopy) + if err != nil { + span.AddEvent("Encoding encrypted object failed") + span.RecordError(err) + return nil, err + } + span.AddEvent("Encoded encrypted object") + + return out, nil } // addTransformerForDecryption inserts a new transformer to the Envelope cache of DEKs for future reads. diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope_test.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope_test.go index f52d4cd9d46d3..57f957a1bb059 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope_test.go @@ -33,6 +33,8 @@ import ( "time" "github.com/gogo/protobuf/proto" + "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/uuid" @@ -1262,6 +1264,165 @@ func TestGenerateTransformer(t *testing.T) { } } +func TestEnvelopeTracing_TransformToStorage(t *testing.T) { + testCases := []struct { + desc string + expected []string + }{ + { + desc: "encrypt", + expected: []string{ + "About to encrypt data using DEK", + "Data encryption succeeded", + "About to encode encrypted object", + "Encoded encrypted object", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + fakeRecorder := tracetest.NewSpanRecorder() + otelTracer := trace.NewTracerProvider(trace.WithSpanProcessor(fakeRecorder)).Tracer("test") + + ctx := testContext(t) + ctx, span := otelTracer.Start(ctx, "parent") + defer span.End() + + envelopeService := newTestEnvelopeService() + fakeClock := testingclock.NewFakeClock(time.Now()) + state, err := testStateFunc(ctx, envelopeService, clock.RealClock{}, randomBool())() + if err != nil { + t.Fatal(err) + } + + transformer := newEnvelopeTransformerWithClock(envelopeService, testProviderName, + func() (State, error) { return state, nil }, testAPIServerID, 1*time.Second, fakeClock) + + dataCtx := value.DefaultContext([]byte(testContextText)) + originalText := []byte(testText) + + if _, err := transformer.TransformToStorage(ctx, originalText, dataCtx); err != nil { + t.Fatalf("envelopeTransformer: error while transforming data to storage: %v", err) + } + + output := fakeRecorder.Ended() + if len(output) != 1 { + t.Fatalf("expected 1 span, got %d", len(output)) + } + out := output[0] + validateTraceSpan(t, out, "TransformToStorage with envelopeTransformer", testProviderName, testAPIServerID, tc.expected) + }) + } +} + +func TestEnvelopeTracing_TransformFromStorage(t *testing.T) { + testCases := []struct { + desc string + cacheTTL time.Duration + simulateKMSPluginFailure bool + expected []string + }{ + { + desc: "decrypt", + cacheTTL: 5 * time.Second, + expected: []string{ + "About to decode encrypted object", + "Decoded encrypted object", + "About to decrypt data using DEK", + "Data decryption succeeded", + }, + }, + { + desc: "decrypt with cache miss", + cacheTTL: 1 * time.Second, + expected: []string{ + "About to decode encrypted object", + "Decoded encrypted object", + "About to decrypt DEK using remote service", + "DEK decryption succeeded", + "About to decrypt data using DEK", + "Data decryption succeeded", + }, + }, + { + desc: "decrypt with cache miss, simulate KMS plugin failure", + cacheTTL: 1 * time.Second, + simulateKMSPluginFailure: true, + expected: []string{ + "About to decode encrypted object", + "Decoded encrypted object", + "About to decrypt DEK using remote service", + "DEK decryption failed", + "exception", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + fakeRecorder := tracetest.NewSpanRecorder() + otelTracer := trace.NewTracerProvider(trace.WithSpanProcessor(fakeRecorder)).Tracer("test") + + ctx := testContext(t) + + envelopeService := newTestEnvelopeService() + fakeClock := testingclock.NewFakeClock(time.Now()) + state, err := testStateFunc(ctx, envelopeService, clock.RealClock{}, randomBool())() + if err != nil { + t.Fatal(err) + } + + transformer := newEnvelopeTransformerWithClock(envelopeService, testProviderName, + func() (State, error) { return state, nil }, testAPIServerID, tc.cacheTTL, fakeClock) + + dataCtx := value.DefaultContext([]byte(testContextText)) + originalText := []byte(testText) + + transformedData, _ := transformer.TransformToStorage(ctx, originalText, dataCtx) + + // advance the clock to allow cache entries to expire depending on TTL + fakeClock.Step(2 * time.Second) + // force GC to run by performing a write + transformer.(*envelopeTransformer).cache.set([]byte("some-other-unrelated-key"), &envelopeTransformer{}) + + envelopeService.SetDisabledStatus(tc.simulateKMSPluginFailure) + + // start recording only for the decrypt call + ctx, span := otelTracer.Start(ctx, "parent") + defer span.End() + + _, _, _ = transformer.TransformFromStorage(ctx, transformedData, dataCtx) + + output := fakeRecorder.Ended() + validateTraceSpan(t, output[0], "TransformFromStorage with envelopeTransformer", testProviderName, testAPIServerID, tc.expected) + }) + } +} + +func validateTraceSpan(t *testing.T, span trace.ReadOnlySpan, spanName, providerName, apiserverID string, expected []string) { + t.Helper() + + if span.Name() != spanName { + t.Fatalf("expected span name %q, got %q", spanName, span.Name()) + } + attrs := span.Attributes() + if len(attrs) != 1 { + t.Fatalf("expected 1 attributes, got %d", len(attrs)) + } + if attrs[0].Key != "transformer.provider.name" && attrs[0].Value.AsString() != providerName { + t.Errorf("expected providerName %q, got %q", providerName, attrs[0].Value.AsString()) + } + if len(span.Events()) != len(expected) { + t.Fatalf("expected %d events, got %d", len(expected), len(span.Events())) + } + for i, event := range span.Events() { + if event.Name != expected[i] { + t.Errorf("expected event %q, got %q", expected[i], event.Name) + } + } +} + func errString(err error) string { if err == nil { return "" diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index f2df57ccc2249..8c90811bf45fc 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -150,9 +150,6 @@ type configController struct { // from server configuration. serverConcurrencyLimit int - // requestWaitLimit comes from server configuration. - requestWaitLimit time.Duration - // watchTracker implements the necessary WatchTracker interface. WatchTracker @@ -287,13 +284,12 @@ func newTestableController(config TestableConfig) *configController { asFieldManager: config.AsFieldManager, foundToDangling: config.FoundToDangling, serverConcurrencyLimit: config.ServerConcurrencyLimit, - requestWaitLimit: config.RequestWaitLimit, flowcontrolClient: config.FlowcontrolClient, priorityLevelStates: make(map[string]*priorityLevelState), WatchTracker: NewWatchTracker(), MaxSeatsTracker: NewMaxSeatsTracker(), } - klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, requestWaitLimit=%s, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.requestWaitLimit, cfgCtlr.name, cfgCtlr.asFieldManager) + klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager) // Start with longish delay because conflicts will be between // different processes, so take some time to go away. cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") @@ -433,7 +429,7 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta plState := plStates[plName] if setCompleters { qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues, - plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl) @@ -657,10 +653,10 @@ func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontro // Supply missing mandatory PriorityLevelConfiguration objects if !meal.haveExemptPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt) } if !meal.haveCatchAllPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll) } meal.finishQueueSetReconfigsLocked() @@ -692,7 +688,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi } } qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, - pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs, + pl, state.reqsGaugePair, state.execSeatsObs, metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge)) if err != nil { klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) @@ -798,7 +794,7 @@ func (meal *cfgMeal) processOldPLsLocked() { } var err error plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, - plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { // This can not happen because queueSetCompleterForPL already approved this config @@ -880,7 +876,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // queueSetCompleterForPL returns an appropriate QueueSetCompleter for the // given priority level configuration. Returns nil and an error if the given // object is malformed in a way that is a problem for this package. -func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementLimited) != (pl.Spec.Limited != nil) { return nil, errors.New("broken union structure at the top, for Limited") } @@ -902,7 +898,6 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow DesiredNumQueues: int(qcAPI.Queues), QueueLengthLimit: int(qcAPI.QueueLengthLimit), HandSize: int(qcAPI.HandSize), - RequestWaitLimit: requestWaitLimit, } } } else { @@ -956,16 +951,15 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl // imaginePL adds a priority level based on one of the mandatory ones // that does not actually exist (right now) as a real API object. -func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { +func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) { klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) labelValues := []string{proto.Name} reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues) execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues) seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name) seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name}) - qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, - requestWaitLimit, reqsGaugePair, execSeatsObs, - metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, reqsGaugePair, + execSeatsObs, metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) if err != nil { // This can not happen because proto is one of the mandatory // objects and these are not erroneous diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 76782623a8475..05f4f5e539286 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -90,7 +90,6 @@ func New( informerFactory kubeinformers.SharedInformerFactory, flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface, serverConcurrencyLimit int, - requestWaitLimit time.Duration, ) Interface { clk := eventclock.Real{} return NewTestable(TestableConfig{ @@ -101,7 +100,6 @@ func New( InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: serverConcurrencyLimit, - RequestWaitLimit: requestWaitLimit, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), @@ -139,9 +137,6 @@ type TestableConfig struct { // ServerConcurrencyLimit for the controller to enforce ServerConcurrencyLimit int - // RequestWaitLimit configured on the server - RequestWaitLimit time.Duration - // GaugeVec for metrics about requests, broken down by phase and priority_level ReqsGaugeVec metrics.RatioedGaugeVec diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter_test.go index 840fecd75ac11..dcc4de38add85 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter_test.go @@ -109,7 +109,6 @@ func TestQueueWaitTimeLatencyTracker(t *testing.T) { InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: 24, - RequestWaitLimit: time.Minute, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/borrowing_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/borrowing_test.go index 8511730f03ccc..e6ab27bea4813 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/borrowing_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/borrowing_test.go @@ -143,7 +143,6 @@ func TestBorrowing(t *testing.T) { InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: 24, - RequestWaitLimit: time.Minute, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go index ec7c58a9b25e1..a50c096802d2b 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go @@ -251,8 +251,7 @@ func TestConfigConsumer(t *testing.T) { FoundToDangling: func(found bool) bool { return !found }, InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, - ServerConcurrencyLimit: 100, // server concurrency limit - RequestWaitLimit: time.Minute, // request wait limit + ServerConcurrencyLimit: 100, // server concurrency limit ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: cts, @@ -384,7 +383,6 @@ func TestAPFControllerWithGracefulShutdown(t *testing.T) { InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: 100, - RequestWaitLimit: time.Minute, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: cts, diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go index 013fd41e087f3..3b0ad16387eab 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -18,7 +18,6 @@ package fairqueuing import ( "context" - "time" "k8s.io/apiserver/pkg/util/flowcontrol/debug" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" @@ -117,7 +116,7 @@ type QueuingConfig struct { // DesiredNumQueues is the number of queues that the API says // should exist now. This may be non-positive, in which case - // QueueLengthLimit, HandSize, and RequestWaitLimit are ignored. + // QueueLengthLimit, and HandSize are ignored. // A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig. // A negative value means to always dispatch immediately upon arrival // (i.e., the requests are "exempt" from limitation). @@ -129,10 +128,6 @@ type QueuingConfig struct { // HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly // dealing a "hand" of this many queues and then picking one of minimum length. HandSize int - - // RequestWaitLimit is the maximum amount of time that a request may wait in a queue. - // If, by the end of that time, the request has not been dispatched then it is rejected. - RequestWaitLimit time.Duration } // DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet. diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index 99397ecbaa23f..b675bb5453c80 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig, } else { qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit qCfg.HandSize = qs.qCfg.HandSize - qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit } qs.qCfg = qCfg @@ -300,9 +299,6 @@ const ( // Serve this one decisionExecute requestDecision = iota - // Reject this one due to APF queuing considerations - decisionReject - // This one's context timed out / was canceled decisionCancel ) @@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo // ======================================================================== // Step 1: // 1) Start with shuffle sharding, to pick a queue. - // 2) Reject old requests that have been waiting too long - // 3) Reject current request if there is not enough concurrency shares and + // 2) Reject current request if there is not enough concurrency shares and // we are at max queue length - // 4) If not rejected, create a request and enqueue - req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + // 3) If not rejected, create a request and enqueue + req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) // req == nil means that the request was rejected - no remaining // concurrency shares and at max queue length already if req == nil { @@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) { } req.waitStarted = true switch decisionAny { - case decisionReject: - klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) - qs.totRequestsRejected++ - qs.totRequestsTimedout++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") - return false, qs.isIdleLocked() - case decisionCancel: + case decisionCancel: // handle in code following this switch case decisionExecute: klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) return true, false @@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) { } // TODO(aaron-prindle) add metrics for this case klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) - // remove the request from the queue as it has timed out + // remove the request from the queue as its queue wait time has exceeded queue := req.queue if req.removeFromQueueLocked() != nil { defer qs.boundNextDispatchLocked(queue) @@ -446,7 +435,7 @@ func (req *request) wait() (bool, bool) { qs.totSeatsWaiting -= req.MaxSeats() qs.totRequestsRejected++ qs.totRequestsCancelled++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled") + metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats()) req.NoteQueued(false) @@ -556,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues) } -// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required +// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required // to validate and enqueue a request for the queueSet/QueueSet: // 1) Start with shuffle sharding, to pick a queue. -// 2) Reject old requests that have been waiting too long -// 3) Reject current request if there is not enough concurrency shares and +// 2) Reject current request if there is not enough concurrency shares and // we are at max queue length -// 4) If not rejected, create a request and enqueue +// 3) If not rejected, create a request and enqueue // returns the enqueud request on a successful enqueue // returns nil in the case that there is no available concurrency or // the queuelengthlimit has been reached -func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { +func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { // Start with the shuffle sharding, to pick a queue. queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2) queue := qs.queues[queueIdx] - // The next step is the logic to reject requests that have been waiting too long - qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName) - // NOTE: currently timeout is only checked for each new request. This means that there can be - // requests that are in the queue longer than the timeout if there are no new requests - // We prefer the simplicity over the promptness, at least for now. defer qs.boundNextDispatchLocked(queue) @@ -633,44 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac return bestQueueIdx } -// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued -// past the requestWaitLimit -func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) { - timeoutCount := 0 - disqueueSeats := 0 - now := qs.clock.Now() - reqs := queue.requestsWaiting - // reqs are sorted oldest -> newest - // can short circuit loop (break) if oldest requests are not timing out - // as newer requests also will not have timed out - - // now - requestWaitLimit = arrivalLimit - arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit) - reqs.Walk(func(req *request) bool { - if arrivalLimit.After(req.arrivalTime) { - if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil { - timeoutCount++ - disqueueSeats += req.MaxSeats() - req.NoteQueued(false) - metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) - metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats()) - } - // we need to check if the next request has timed out. - return true - } - // since reqs are sorted oldest -> newest, we are done here. - return false - }) - - // remove timed out requests from queue - if timeoutCount > 0 { - qs.totRequestsWaiting -= timeoutCount - qs.totSeatsWaiting -= disqueueSeats - qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount)) - qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) - } -} - // rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived // request, which has been assigned to a queue. If up against the // queue length limit and the concurrency limit then returns false. diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go index ab43b54c45aa6..5e9399605da22 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go @@ -551,7 +551,6 @@ func TestBaseline(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 8, HandSize: 3, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, "seatDemandSubject") qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -590,7 +589,6 @@ func TestExampt(t *testing.T) { DesiredNumQueues: -1, QueueLengthLimit: 2, HandSize: 3, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, "seatDemandSubject") qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -663,7 +661,6 @@ func TestSeparations(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 8, HandSize: 3, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, caseName+" seatDemandSubject") qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -704,7 +701,6 @@ func TestUniformFlowsHandSize1(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 8, HandSize: 1, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, "seatDemandSubject") qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -743,7 +739,6 @@ func TestUniformFlowsHandSize3(t *testing.T) { DesiredNumQueues: 8, QueueLengthLimit: 16, HandSize: 3, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -781,7 +776,6 @@ func TestDifferentFlowsExpectEqual(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 8, HandSize: 1, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -823,7 +817,6 @@ func TestSeatSecondsRollover(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 8, HandSize: 1, - RequestWaitLimit: 40 * Quarter, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -863,7 +856,6 @@ func TestDifferentFlowsExpectUnequal(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 6, HandSize: 1, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -902,7 +894,6 @@ func TestDifferentWidths(t *testing.T) { DesiredNumQueues: 64, QueueLengthLimit: 13, HandSize: 7, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -940,7 +931,6 @@ func TestTooWide(t *testing.T) { DesiredNumQueues: 64, QueueLengthLimit: 35, HandSize: 7, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -1003,7 +993,6 @@ func TestWindup(t *testing.T) { DesiredNumQueues: 9, QueueLengthLimit: 6, HandSize: 1, - RequestWaitLimit: 10 * time.Minute, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -1067,44 +1056,6 @@ func TestDifferentFlowsWithoutQueuing(t *testing.T) { }.exercise(t) } -func TestTimeout(t *testing.T) { - metrics.Register() - now := time.Now() - - clk, counter := testeventclock.NewFake(now, 0, nil) - qsf := newTestableQueueSetFactory(clk, countingPromiseFactoryFactory(counter)) - qCfg := fq.QueuingConfig{ - Name: "TestTimeout", - DesiredNumQueues: 128, - QueueLengthLimit: 128, - HandSize: 1, - RequestWaitLimit: 0, - } - seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) - qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) - if err != nil { - t.Fatal(err) - } - qs := qsComplete(qsc, 1) - - uniformScenario{name: qCfg.Name, - qs: qs, - clients: []uniformClient{ - newUniformClient(1001001001, 5, 100, time.Second, time.Second), - }, - concurrencyLimit: 1, - evalDuration: time.Second * 10, - expectedFair: []bool{true}, - expectedFairnessMargin: []float64{0.01}, - evalInqueueMetrics: true, - evalExecutingMetrics: true, - rejectReason: "time-out", - clk: clk, - counter: counter, - seatDemandIntegratorSubject: seatDemandIntegratorSubject, - }.exercise(t) -} - // TestContextCancel tests cancellation of a request's context. // The outline is: // 1. Use a concurrency limit of 1. @@ -1131,7 +1082,6 @@ func TestContextCancel(t *testing.T) { DesiredNumQueues: 11, QueueLengthLimit: 11, HandSize: 1, - RequestWaitLimit: 15 * time.Second, } seatDemandIntegratorSubject := fq.NewNamedIntegrator(clk, qCfg.Name) qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), seatDemandIntegratorSubject) @@ -1238,7 +1188,6 @@ func TestTotalRequestsExecutingWithPanic(t *testing.T) { qCfg := fq.QueuingConfig{ Name: "TestTotalRequestsExecutingWithPanic", DesiredNumQueues: 0, - RequestWaitLimit: 15 * time.Second, } qsc, err := qsf.BeginConstruction(qCfg, newGaugePair(clk), newExecSeatsGauge(clk), fq.NewNamedIntegrator(clk, qCfg.Name)) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/gen_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/gen_test.go index 34e67386c0efd..4b0d7dba4d8d6 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/gen_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/gen_test.go @@ -21,7 +21,6 @@ import ( "math/rand" "sync/atomic" "testing" - "time" "k8s.io/utils/clock" @@ -60,7 +59,7 @@ func genPL(rng *rand.Rand, name string) *flowcontrol.PriorityLevelConfiguration QueueLengthLimit: 5} } labelVals := []string{"test"} - _, err := queueSetCompleterForPL(noRestraintQSF, nil, plc, time.Minute, metrics.RatioedGaugeVecPhasedElementPair(metrics.PriorityLevelConcurrencyGaugeVec, 1, 1, labelVals), metrics.PriorityLevelExecutionSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelVals), fq.NewNamedIntegrator(clock.RealClock{}, name)) + _, err := queueSetCompleterForPL(noRestraintQSF, nil, plc, metrics.RatioedGaugeVecPhasedElementPair(metrics.PriorityLevelConcurrencyGaugeVec, 1, 1, labelVals), metrics.PriorityLevelExecutionSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelVals), fq.NewNamedIntegrator(clock.RealClock{}, name)) if err != nil { panic(err) } diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/max_seats_test.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/max_seats_test.go index 23697acfaa132..92c0367f27264 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/max_seats_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/max_seats_test.go @@ -108,7 +108,6 @@ func Test_GetMaxSeats(t *testing.T) { // for the purposes of this test, serverCL ~= nominalCL since there is // only 1 PL with large concurrency shares, making mandatory PLs negligible. ServerConcurrencyLimit: testcase.nominalCL, - RequestWaitLimit: time.Minute, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go b/staging/src/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go new file mode 100644 index 0000000000000..94ea13dff5ba9 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go @@ -0,0 +1,167 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/mxk/go-flowrate/flowrate" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + constants "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/util/exec" +) + +// StreamTranslatorHandler is a handler which translates WebSocket stream data +// to SPDY to proxy to kubelet (and ContainerRuntime). +type StreamTranslatorHandler struct { + // Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server + // for upgrade requests. + Location *url.URL + // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used + Transport http.RoundTripper + // MaxBytesPerSec throttles stream Reader/Writer if necessary + MaxBytesPerSec int64 + // Options define the requested streams (e.g. stdin, stdout). + Options Options +} + +// NewStreamTranslatorHandler creates a new proxy handler. Responder is required for returning +// errors to the caller. +func NewStreamTranslatorHandler(location *url.URL, transport http.RoundTripper, maxBytesPerSec int64, opts Options) *StreamTranslatorHandler { + return &StreamTranslatorHandler{ + Location: location, + Transport: transport, + MaxBytesPerSec: maxBytesPerSec, + Options: opts, + } +} + +func (h *StreamTranslatorHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Create WebSocket server, including particular streams requested. If this websocket + // endpoint is not able to be upgraded, the websocket library will return errors + // to the client. + websocketStreams, err := webSocketServerStreams(req, w, h.Options) + if err != nil { + return + } + defer websocketStreams.conn.Close() + + // Creating SPDY executor, ensuring redirects are not followed. + spdyRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{UpgradeTransport: h.Transport}) + if err != nil { + websocketStreams.writeStatus(apierrors.NewInternalError(err)) //nolint:errcheck + return + } + spdyExecutor, err := remotecommand.NewSPDYExecutorRejectRedirects(spdyRoundTripper, spdyRoundTripper, "POST", h.Location) + if err != nil { + websocketStreams.writeStatus(apierrors.NewInternalError(err)) //nolint:errcheck + return + } + + // Wire the WebSocket server streams output to the SPDY client input. The stdin/stdout/stderr streams + // can be throttled if the transfer rate exceeds the "MaxBytesPerSec" (zero means unset). Throttling + // the streams instead of the underlying connection *may* not perform the same if two streams + // traveling the same direction (e.g. stdout, stderr) are being maxed out. + opts := remotecommand.StreamOptions{} + if h.Options.Stdin { + stdin := websocketStreams.stdinStream + if h.MaxBytesPerSec > 0 { + stdin = flowrate.NewReader(stdin, h.MaxBytesPerSec) + } + opts.Stdin = stdin + } + if h.Options.Stdout { + stdout := websocketStreams.stdoutStream + if h.MaxBytesPerSec > 0 { + stdout = flowrate.NewWriter(stdout, h.MaxBytesPerSec) + } + opts.Stdout = stdout + } + if h.Options.Stderr { + stderr := websocketStreams.stderrStream + if h.MaxBytesPerSec > 0 { + stderr = flowrate.NewWriter(stderr, h.MaxBytesPerSec) + } + opts.Stderr = stderr + } + if h.Options.Tty { + opts.Tty = true + opts.TerminalSizeQueue = &translatorSizeQueue{resizeChan: websocketStreams.resizeChan} + } + // Start the SPDY client with connected streams. Output from the WebSocket server + // streams will be forwarded into the SPDY client. Report SPDY execution errors + // through the websocket error stream. + err = spdyExecutor.StreamWithContext(req.Context(), opts) + if err != nil { + //nolint:errcheck // Ignore writeStatus returned error + if statusErr, ok := err.(*apierrors.StatusError); ok { + websocketStreams.writeStatus(statusErr) + } else if exitErr, ok := err.(exec.CodeExitError); ok && exitErr.Exited() { + websocketStreams.writeStatus(codeExitToStatusError(exitErr)) + } else { + websocketStreams.writeStatus(apierrors.NewInternalError(err)) + } + return + } + + // Write the success status back to the WebSocket client. + //nolint:errcheck + websocketStreams.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusSuccess, + }}) +} + +// translatorSizeQueue feeds the size events from the WebSocket +// resizeChan into the SPDY client input. Implements TerminalSizeQueue +// interface. +type translatorSizeQueue struct { + resizeChan chan remotecommand.TerminalSize +} + +func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize { + size, ok := <-t.resizeChan + if !ok { + return nil + } + return &size +} + +// codeExitToStatusError converts a passed CodeExitError to the type necessary +// to send through an error stream using "writeStatus". +func codeExitToStatusError(exitErr exec.CodeExitError) *apierrors.StatusError { + rc := exitErr.ExitStatus() + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: constants.NonZeroExitCodeReason, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{ + { + Type: constants.ExitCodeCauseType, + Message: fmt.Sprintf("%d", rc), + }, + }, + }, + Message: fmt.Sprintf("command terminated with non-zero exit code: %v", exitErr), + }, + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/streamtranslator_test.go b/staging/src/k8s.io/apiserver/pkg/util/proxy/streamtranslator_test.go new file mode 100644 index 0000000000000..6246c35d49c7b --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/proxy/streamtranslator_test.go @@ -0,0 +1,872 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "math" + mrand "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + rcconstants "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/transport" +) + +// TestStreamTranslator_LoopbackStdinToStdout returns random data sent on the client's +// STDIN channel back onto the client's STDOUT channel. There are two servers in this test: the +// upstream fake SPDY server, and the StreamTranslator server. The StreamTranslator proxys the +// data received from the websocket client upstream to the SPDY server (by translating the +// websocket data into spdy). The returned data read on the websocket client STDOUT is then +// compared the random data sent on STDIN to ensure they are the same. +func TestStreamTranslator_LoopbackStdinToStdout(t *testing.T) { + // Create upstream fake SPDY server which copies STDIN back onto STDOUT stream. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, err := createSPDYServerStreams(w, req, Options{ + Stdin: true, + Stdout: true, + }) + if err != nil { + t.Errorf("error on createHTTPStreams: %v", err) + return + } + defer ctx.conn.Close() + // Loopback STDIN data onto STDOUT stream. + _, err = io.Copy(ctx.stdoutStream, ctx.stdinStream) + if err != nil { + t.Fatalf("error copying STDIN to STDOUT: %v", err) + } + + })) + defer spdyServer.Close() + // Create StreamTranslatorHandler, which points upstream to fake SPDY server with + // streams STDIN and STDOUT. Create test server from StreamTranslatorHandler. + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Stdin: true, Stdout: true} + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDOUT buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stdout bytes.Buffer + options := &remotecommand.StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stdout: &stdout, + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + data, err := io.ReadAll(bytes.NewReader(stdout.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDOUT. + if !bytes.Equal(randomData, data) { + t.Errorf("unexpected data received: %d sent: %d", len(data), len(randomData)) + } +} + +// TestStreamTranslator_LoopbackStdinToStderr returns random data sent on the client's +// STDIN channel back onto the client's STDERR channel. There are two servers in this test: the +// upstream fake SPDY server, and the StreamTranslator server. The StreamTranslator proxys the +// data received from the websocket client upstream to the SPDY server (by translating the +// websocket data into spdy). The returned data read on the websocket client STDERR is then +// compared the random data sent on STDIN to ensure they are the same. +func TestStreamTranslator_LoopbackStdinToStderr(t *testing.T) { + // Create upstream fake SPDY server which copies STDIN back onto STDERR stream. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, err := createSPDYServerStreams(w, req, Options{ + Stdin: true, + Stderr: true, + }) + if err != nil { + t.Errorf("error on createHTTPStreams: %v", err) + return + } + defer ctx.conn.Close() + // Loopback STDIN data onto STDERR stream. + _, err = io.Copy(ctx.stderrStream, ctx.stdinStream) + if err != nil { + t.Fatalf("error copying STDIN to STDERR: %v", err) + } + })) + defer spdyServer.Close() + // Create StreamTranslatorHandler, which points upstream to fake SPDY server with + // streams STDIN and STDERR. Create test server from StreamTranslatorHandler. + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Stdin: true, Stderr: true} + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDERR buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stderr bytes.Buffer + options := &remotecommand.StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stderr: &stderr, + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + data, err := io.ReadAll(bytes.NewReader(stderr.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDERR. + if !bytes.Equal(randomData, data) { + t.Errorf("unexpected data received: %d sent: %d", len(data), len(randomData)) + } +} + +// Returns a random exit code in the range(1-127). +func randomExitCode() int { + errorCode := mrand.Intn(127) // Range: (0 - 126) + errorCode += 1 // Range: (1 - 127) + return errorCode +} + +// TestStreamTranslator_ErrorStream tests the error stream by sending an error with a random +// exit code, then validating the error arrives on the error stream. +func TestStreamTranslator_ErrorStream(t *testing.T) { + expectedExitCode := randomExitCode() + // Create upstream fake SPDY server, returning a non-zero exit code + // on error stream within the structured error. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, err := createSPDYServerStreams(w, req, Options{ + Stdout: true, + }) + if err != nil { + t.Errorf("error on createHTTPStreams: %v", err) + return + } + defer ctx.conn.Close() + // Read/discard STDIN data before returning error on error stream. + _, err = io.Copy(io.Discard, ctx.stdinStream) + if err != nil { + t.Fatalf("error copying STDIN to DISCARD: %v", err) + } + // Force an non-zero exit code error returned on the error stream. + err = ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: rcconstants.NonZeroExitCodeReason, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{ + { + Type: rcconstants.ExitCodeCauseType, + Message: fmt.Sprintf("%d", expectedExitCode), + }, + }, + }, + }}) + if err != nil { + t.Fatalf("error writing status: %v", err) + } + })) + defer spdyServer.Close() + // Create StreamTranslatorHandler, which points upstream to fake SPDY server, and + // create a test server using the StreamTranslatorHandler. + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Stdin: true} + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + // Generate random data, and set it up to stream on STDIN. The data will be discarded at + // upstream SDPY server. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + options := &remotecommand.StreamOptions{ + Stdin: bytes.NewReader(randomData), + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + // Expect exit code error on error stream. + if err == nil { + t.Errorf("expected error, but received none") + } + expectedError := fmt.Sprintf("command terminated with exit code %d", expectedExitCode) + // Compare expected error with exit code to actual error. + if expectedError != err.Error() { + t.Errorf("expected error (%s), got (%s)", expectedError, err) + } + } +} + +// TestStreamTranslator_MultipleReadChannels tests two streams (STDOUT, STDERR) reading from +// the connections at the same time. +func TestStreamTranslator_MultipleReadChannels(t *testing.T) { + // Create upstream fake SPDY server which copies STDIN back onto STDOUT and STDERR stream. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, err := createSPDYServerStreams(w, req, Options{ + Stdin: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + t.Errorf("error on createHTTPStreams: %v", err) + return + } + defer ctx.conn.Close() + // TeeReader copies data read on STDIN onto STDERR. + stdinReader := io.TeeReader(ctx.stdinStream, ctx.stderrStream) + // Also copy STDIN to STDOUT. + _, err = io.Copy(ctx.stdoutStream, stdinReader) + if err != nil { + t.Errorf("error copying STDIN to STDOUT: %v", err) + } + })) + defer spdyServer.Close() + // Create StreamTranslatorHandler, which points upstream to fake SPDY server with + // streams STDIN, STDOUT, and STDERR. Create test server from StreamTranslatorHandler. + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Stdin: true, Stdout: true, Stderr: true} + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDOUT and STDERR buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stdout, stderr bytes.Buffer + options := &remotecommand.StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stdout: &stdout, + Stderr: &stderr, + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + stdoutBytes, err := io.ReadAll(bytes.NewReader(stdout.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDOUT. + if !bytes.Equal(stdoutBytes, randomData) { + t.Errorf("unexpected data received: %d sent: %d", len(stdoutBytes), len(randomData)) + } + stderrBytes, err := io.ReadAll(bytes.NewReader(stderr.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDERR. + if !bytes.Equal(stderrBytes, randomData) { + t.Errorf("unexpected data received: %d sent: %d", len(stderrBytes), len(randomData)) + } +} + +// TestStreamTranslator_ThrottleReadChannels tests two streams (STDOUT, STDERR) using rate limited streams. +func TestStreamTranslator_ThrottleReadChannels(t *testing.T) { + // Create upstream fake SPDY server which copies STDIN back onto STDOUT and STDERR stream. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, err := createSPDYServerStreams(w, req, Options{ + Stdin: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + t.Errorf("error on createHTTPStreams: %v", err) + return + } + defer ctx.conn.Close() + // TeeReader copies data read on STDIN onto STDERR. + stdinReader := io.TeeReader(ctx.stdinStream, ctx.stderrStream) + // Also copy STDIN to STDOUT. + _, err = io.Copy(ctx.stdoutStream, stdinReader) + if err != nil { + t.Errorf("error copying STDIN to STDOUT: %v", err) + } + })) + defer spdyServer.Close() + // Create StreamTranslatorHandler, which points upstream to fake SPDY server with + // streams STDIN, STDOUT, and STDERR. Create test server from StreamTranslatorHandler. + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Stdin: true, Stdout: true, Stderr: true} + maxBytesPerSec := 900 * 1024 // slightly less than the 1MB that is being transferred to exercise throttling. + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, int64(maxBytesPerSec), streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDOUT and STDERR buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stdout, stderr bytes.Buffer + options := &remotecommand.StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stdout: &stdout, + Stderr: &stderr, + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + stdoutBytes, err := io.ReadAll(bytes.NewReader(stdout.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDOUT. + if !bytes.Equal(stdoutBytes, randomData) { + t.Errorf("unexpected data received: %d sent: %d", len(stdoutBytes), len(randomData)) + } + stderrBytes, err := io.ReadAll(bytes.NewReader(stderr.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDERR. + if !bytes.Equal(stderrBytes, randomData) { + t.Errorf("unexpected data received: %d sent: %d", len(stderrBytes), len(randomData)) + } +} + +// fakeTerminalSizeQueue implements TerminalSizeQueue, returning a random set of +// "maxSizes" number of TerminalSizes, storing the TerminalSizes in "sizes" slice. +type fakeTerminalSizeQueue struct { + maxSizes int + terminalSizes []remotecommand.TerminalSize +} + +// newTerminalSizeQueue returns a pointer to a fakeTerminalSizeQueue passing +// "max" number of random TerminalSizes created. +func newTerminalSizeQueue(max int) *fakeTerminalSizeQueue { + return &fakeTerminalSizeQueue{ + maxSizes: max, + terminalSizes: make([]remotecommand.TerminalSize, 0, max), + } +} + +// Next returns a pointer to the next random TerminalSize, or nil if we have +// already returned "maxSizes" TerminalSizes already. Stores the randomly +// created TerminalSize in "terminalSizes" field for later validation. +func (f *fakeTerminalSizeQueue) Next() *remotecommand.TerminalSize { + if len(f.terminalSizes) >= f.maxSizes { + return nil + } + size := randomTerminalSize() + f.terminalSizes = append(f.terminalSizes, size) + return &size +} + +// randomTerminalSize returns a TerminalSize with random values in the +// range (0-65535) for the fields Width and Height. +func randomTerminalSize() remotecommand.TerminalSize { + randWidth := uint16(mrand.Intn(int(math.Pow(2, 16)))) + randHeight := uint16(mrand.Intn(int(math.Pow(2, 16)))) + return remotecommand.TerminalSize{ + Width: randWidth, + Height: randHeight, + } +} + +// TestStreamTranslator_MultipleWriteChannels +func TestStreamTranslator_TTYResizeChannel(t *testing.T) { + // Create the fake terminal size queue and the actualTerminalSizes which + // will be received at the opposite websocket endpoint. + numSizeQueue := 10000 + sizeQueue := newTerminalSizeQueue(numSizeQueue) + actualTerminalSizes := make([]remotecommand.TerminalSize, 0, numSizeQueue) + // Create upstream fake SPDY server which copies STDIN back onto STDERR stream. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, err := createSPDYServerStreams(w, req, Options{ + Tty: true, + }) + if err != nil { + t.Errorf("error on createHTTPStreams: %v", err) + return + } + defer ctx.conn.Close() + // Read the terminal resize requests, storing them in actualTerminalSizes + for i := 0; i < numSizeQueue; i++ { + actualTerminalSize := <-ctx.resizeChan + actualTerminalSizes = append(actualTerminalSizes, actualTerminalSize) + } + })) + defer spdyServer.Close() + // Create StreamTranslatorHandler, which points upstream to fake SPDY server with + // resize (TTY resize) stream. Create test server from StreamTranslatorHandler. + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Tty: true} + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + options := &remotecommand.StreamOptions{ + Tty: true, + TerminalSizeQueue: sizeQueue, + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + // Validate the random TerminalSizes sent on the resize stream are the same + // as the actual TerminalSizes received at the websocket server. + if len(actualTerminalSizes) != numSizeQueue { + t.Fatalf("expected to receive num terminal resizes (%d), got (%d)", + numSizeQueue, len(actualTerminalSizes)) + } + for i, actual := range actualTerminalSizes { + expected := sizeQueue.terminalSizes[i] + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected terminal resize window %v, got %v", expected, actual) + } + } +} + +// TestStreamTranslator_WebSocketServerErrors validates that when there is a problem creating +// the websocket server as the first step of the StreamTranslator an error is properly returned. +func TestStreamTranslator_WebSocketServerErrors(t *testing.T) { + spdyLocation, err := url.Parse("http://127.0.0.1") + if err != nil { + t.Fatalf("Unable to parse spdy server URL") + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, Options{}) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutorForProtocols( + &rest.Config{Host: streamTranslatorLocation.Host}, + "GET", + streamTranslatorServer.URL, + rcconstants.StreamProtocolV4Name, // RemoteCommand V4 protocol is unsupported + ) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. The WebSocket server within the + // StreamTranslator propagates an error here because the V4 protocol is not supported. + errorChan <- exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{}) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + // Must return "websocket unable to upgrade" (bad handshake) error. + if err == nil { + t.Fatalf("expected error, but received none") + } + if !strings.Contains(err.Error(), "unable to upgrade streaming request") { + t.Errorf("expected websocket bad handshake error, got (%s)", err) + } + } +} + +// TestStreamTranslator_BlockRedirects verifies that the StreamTranslator will *not* follow +// redirects; it will thrown an error instead. +func TestStreamTranslator_BlockRedirects(t *testing.T) { + for _, statusCode := range []int{ + http.StatusMovedPermanently, // 301 + http.StatusFound, // 302 + http.StatusSeeOther, // 303 + http.StatusTemporaryRedirect, // 307 + http.StatusPermanentRedirect, // 308 + } { + // Create upstream fake SPDY server which returns a redirect. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Location", "/") + w.WriteHeader(statusCode) + })) + defer spdyServer.Close() + spdyLocation, err := url.Parse(spdyServer.URL) + if err != nil { + t.Fatalf("Unable to parse spdy server URL: %s", spdyServer.URL) + } + spdyTransport, err := fakeTransport() + if err != nil { + t.Fatalf("Unexpected error creating transport: %v", err) + } + streams := Options{Stdout: true} + streamTranslator := NewStreamTranslatorHandler(spdyLocation, spdyTransport, 0, streams) + streamTranslatorServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamTranslator.ServeHTTP(w, req) + })) + defer streamTranslatorServer.Close() + // Now create the websocket client (executor), and point it to the "streamTranslatorServer". + streamTranslatorLocation, err := url.Parse(streamTranslatorServer.URL) + if err != nil { + t.Fatalf("Unable to parse StreamTranslator server URL: %s", streamTranslatorServer.URL) + } + exec, err := remotecommand.NewWebSocketExecutor(&rest.Config{Host: streamTranslatorLocation.Host}, "GET", streamTranslatorServer.URL) + if err != nil { + t.Errorf("unexpected error creating websocket executor: %v", err) + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + // Should return "redirect not allowed" error. + errorChan <- exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{}) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + // Must return "redirect now allowed" error. + if err == nil { + t.Fatalf("expected error, but received none") + } + if !strings.Contains(err.Error(), "redirect not allowed") { + t.Errorf("expected redirect not allowed error, got (%s)", err) + } + } + } +} + +// streamContext encapsulates the structures necessary to communicate through +// a SPDY connection, including the Reader/Writer streams. +type streamContext struct { + conn io.Closer + stdinStream io.ReadCloser + stdoutStream io.WriteCloser + stderrStream io.WriteCloser + resizeStream io.ReadCloser + resizeChan chan remotecommand.TerminalSize + writeStatus func(status *apierrors.StatusError) error +} + +type streamAndReply struct { + httpstream.Stream + replySent <-chan struct{} +} + +// CreateSPDYServerStreams upgrades the passed HTTP request to a SPDY bi-directional streaming +// connection with remote command streams defined in passed options. Returns a streamContext +// structure containing the Reader/Writer streams to communicate through the SDPY connection. +// Returns an error if unable to upgrade the HTTP connection to a SPDY connection. +func createSPDYServerStreams(w http.ResponseWriter, req *http.Request, opts Options) (*streamContext, error) { + _, err := httpstream.Handshake(req, w, []string{rcconstants.StreamProtocolV4Name}) + if err != nil { + return nil, err + } + + upgrader := spdy.NewResponseUpgrader() + streamCh := make(chan streamAndReply) + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream, replySent <-chan struct{}) error { + streamCh <- streamAndReply{Stream: stream, replySent: replySent} + return nil + }) + ctx := &streamContext{ + conn: conn, + } + + // wait for stream + replyChan := make(chan struct{}, 5) + defer close(replyChan) + receivedStreams := 0 + expectedStreams := 1 // expect at least the error stream + if opts.Stdout { + expectedStreams++ + } + if opts.Stdin { + expectedStreams++ + } + if opts.Stderr { + expectedStreams++ + } + if opts.Tty { + expectedStreams++ + } +WaitForStreams: + for { + select { + case stream := <-streamCh: + streamType := stream.Headers().Get(v1.StreamType) + switch streamType { + case v1.StreamTypeError: + replyChan <- struct{}{} + ctx.writeStatus = v4WriteStatusFunc(stream) + case v1.StreamTypeStdout: + replyChan <- struct{}{} + ctx.stdoutStream = stream + case v1.StreamTypeStdin: + replyChan <- struct{}{} + ctx.stdinStream = stream + case v1.StreamTypeStderr: + replyChan <- struct{}{} + ctx.stderrStream = stream + case v1.StreamTypeResize: + replyChan <- struct{}{} + ctx.resizeStream = stream + default: + // add other stream ... + return nil, errors.New("unimplemented stream type") + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + } + } + + if ctx.resizeStream != nil { + ctx.resizeChan = make(chan remotecommand.TerminalSize) + go handleResizeEvents(req.Context(), ctx.resizeStream, ctx.resizeChan) + } + + return ctx, nil +} + +func v4WriteStatusFunc(stream io.Writer) func(status *apierrors.StatusError) error { + return func(status *apierrors.StatusError) error { + bs, err := json.Marshal(status.Status()) + if err != nil { + return err + } + _, err = stream.Write(bs) + return err + } +} + +func fakeTransport() (*http.Transport, error) { + cfg := &transport.Config{ + TLS: transport.TLSConfig{ + Insecure: true, + CAFile: "", + }, + } + rt, err := transport.New(cfg) + if err != nil { + return nil, err + } + t, ok := rt.(*http.Transport) + if !ok { + return nil, fmt.Errorf("unknown transport type: %T", rt) + } + return t, nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/translatinghandler.go b/staging/src/k8s.io/apiserver/pkg/util/proxy/translatinghandler.go new file mode 100644 index 0000000000000..6f6c008824198 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/proxy/translatinghandler.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "net/http" + + "k8s.io/klog/v2" +) + +// translatingHandler wraps the delegate handler, implementing the +// http.Handler interface. The delegate handles all requests unless +// the request satisfies the passed "shouldTranslate" function +// (currently only for WebSocket/V5 request), in which case the translator +// handles the request. +type translatingHandler struct { + delegate http.Handler + translator http.Handler + shouldTranslate func(*http.Request) bool +} + +func NewTranslatingHandler(delegate http.Handler, translator http.Handler, shouldTranslate func(*http.Request) bool) http.Handler { + return &translatingHandler{ + delegate: delegate, + translator: translator, + shouldTranslate: shouldTranslate, + } +} + +func (t *translatingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if t.shouldTranslate(req) { + klog.V(4).Infof("request handled by translator proxy") + t.translator.ServeHTTP(w, req) + return + } + t.delegate.ServeHTTP(w, req) +} diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/translatinghandler_test.go b/staging/src/k8s.io/apiserver/pkg/util/proxy/translatinghandler_test.go new file mode 100644 index 0000000000000..ee5a53ed88a39 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/proxy/translatinghandler_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/httpstream/wsstream" +) + +// fakeHandler implements http.Handler interface +type fakeHandler struct { + served bool +} + +// ServeHTTP stores the fact that this fake handler was called. +func (fh *fakeHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + fh.served = true +} + +func TestTranslatingHandler(t *testing.T) { + tests := map[string]struct { + upgrade string + version string + expectTranslator bool + }{ + "websocket/v5 upgrade, serves translator": { + upgrade: "websocket", + version: "v5.channel.k8s.io", + expectTranslator: true, + }, + "websocket/v5 upgrade with multiple other versions, serves translator": { + upgrade: "websocket", + version: "v5.channel.k8s.io, v4.channel.k8s.io, v3.channel.k8s.io", + expectTranslator: true, + }, + "websocket/v5 upgrade with multiple other versions out of order, serves translator": { + upgrade: "websocket", + version: "v4.channel.k8s.io, v3.channel.k8s.io, v5.channel.k8s.io", + expectTranslator: true, + }, + "no upgrade, serves delegate": { + upgrade: "", + version: "", + expectTranslator: false, + }, + "no upgrade with v5, serves delegate": { + upgrade: "", + version: "v5.channel.k8s.io", + expectTranslator: false, + }, + "websocket/v5 wrong case upgrade, serves delegage": { + upgrade: "websocket", + version: "v5.CHANNEL.k8s.io", + expectTranslator: false, + }, + "spdy/v5 upgrade, serves delegate": { + upgrade: "spdy", + version: "v5.channel.k8s.io", + expectTranslator: false, + }, + "spdy/v4 upgrade, serves delegate": { + upgrade: "spdy", + version: "v4.channel.k8s.io", + expectTranslator: false, + }, + "websocket/v4 upgrade, serves delegate": { + upgrade: "websocket", + version: "v4.channel.k8s.io", + expectTranslator: false, + }, + "websocket without version upgrade, serves delegate": { + upgrade: "websocket", + version: "", + expectTranslator: false, + }, + } + for name, test := range tests { + req, err := http.NewRequest("GET", "http://www.example.com/", nil) + require.NoError(t, err) + if test.upgrade != "" { + req.Header.Add("Connection", "Upgrade") + req.Header.Add("Upgrade", test.upgrade) + } + if len(test.version) > 0 { + req.Header.Add(wsstream.WebSocketProtocolHeader, test.version) + } + delegate := fakeHandler{} + translator := fakeHandler{} + translatingHandler := NewTranslatingHandler(&delegate, &translator, + wsstream.IsWebSocketRequestWithStreamCloseProtocol) + translatingHandler.ServeHTTP(nil, req) + if !delegate.served && !translator.served { + t.Errorf("unexpected neither translator nor delegate served") + continue + } + if test.expectTranslator { + if !translator.served { + t.Errorf("%s: expected translator served, got delegate served", name) + } + } else if !delegate.served { + t.Errorf("%s: expected delegate served, got translator served", name) + } + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/util/proxy/websocket.go b/staging/src/k8s.io/apiserver/pkg/util/proxy/websocket.go new file mode 100644 index 0000000000000..3b9746b3b2f0e --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/proxy/websocket.go @@ -0,0 +1,200 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/httpstream/wsstream" + constants "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +const ( + // idleTimeout is the read/write deadline set for websocket server connection. Reading + // or writing the connection will return an i/o timeout if this deadline is exceeded. + // Currently, we use the same value as the kubelet websocket server. + defaultIdleConnectionTimeout = 4 * time.Hour + + // Deadline for writing errors to the websocket connection before io/timeout. + writeErrorDeadline = 10 * time.Second +) + +// Options contains details about which streams are required for +// remote command execution. +type Options struct { + Stdin bool + Stdout bool + Stderr bool + Tty bool +} + +// conns contains the connection and streams used when +// forwarding an attach or execute session into a container. +type conns struct { + conn io.Closer + stdinStream io.ReadCloser + stdoutStream io.WriteCloser + stderrStream io.WriteCloser + writeStatus func(status *apierrors.StatusError) error + resizeStream io.ReadCloser + resizeChan chan remotecommand.TerminalSize + tty bool +} + +// Create WebSocket server streams to respond to a WebSocket client. Creates the streams passed +// in the stream options. +func webSocketServerStreams(req *http.Request, w http.ResponseWriter, opts Options) (*conns, error) { + ctx, err := createWebSocketStreams(req, w, opts) + if err != nil { + return nil, err + } + + if ctx.resizeStream != nil { + ctx.resizeChan = make(chan remotecommand.TerminalSize) + go func() { + // Resize channel closes in panic case, and panic does not take down caller. + defer func() { + if p := recover(); p != nil { + // Standard panic logging. + for _, fn := range runtime.PanicHandlers { + fn(p) + } + } + }() + handleResizeEvents(req.Context(), ctx.resizeStream, ctx.resizeChan) + }() + } + + return ctx, nil +} + +// Read terminal resize events off of passed stream and queue into passed channel. +func handleResizeEvents(ctx context.Context, stream io.Reader, channel chan<- remotecommand.TerminalSize) { + defer close(channel) + + decoder := json.NewDecoder(stream) + for { + size := remotecommand.TerminalSize{} + if err := decoder.Decode(&size); err != nil { + break + } + + select { + case channel <- size: + case <-ctx.Done(): + // To avoid leaking this routine, exit if the http request finishes. This path + // would generally be hit if starting the process fails and nothing is started to + // ingest these resize events. + return + } + } +} + +// createChannels returns the standard channel types for a shell connection (STDIN 0, STDOUT 1, STDERR 2) +// along with the approximate duplex value. It also creates the error (3) and resize (4) channels. +func createChannels(opts Options) []wsstream.ChannelType { + // open the requested channels, and always open the error channel + channels := make([]wsstream.ChannelType, 5) + channels[constants.StreamStdIn] = readChannel(opts.Stdin) + channels[constants.StreamStdOut] = writeChannel(opts.Stdout) + channels[constants.StreamStdErr] = writeChannel(opts.Stderr) + channels[constants.StreamErr] = wsstream.WriteChannel + channels[constants.StreamResize] = wsstream.ReadChannel + return channels +} + +// readChannel returns wsstream.ReadChannel if real is true, or wsstream.IgnoreChannel. +func readChannel(real bool) wsstream.ChannelType { + if real { + return wsstream.ReadChannel + } + return wsstream.IgnoreChannel +} + +// writeChannel returns wsstream.WriteChannel if real is true, or wsstream.IgnoreChannel. +func writeChannel(real bool) wsstream.ChannelType { + if real { + return wsstream.WriteChannel + } + return wsstream.IgnoreChannel +} + +// createWebSocketStreams returns a "conns" struct containing the websocket connection and +// streams needed to perform an exec or an attach. +func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts Options) (*conns, error) { + channels := createChannels(opts) + conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{ + // WebSocket server only supports remote command version 5. + constants.StreamProtocolV5Name: { + Binary: true, + Channels: channels, + }, + }) + conn.SetIdleTimeout(defaultIdleConnectionTimeout) + // Opening the connection responds to WebSocket client, negotiating + // the WebSocket upgrade connection and the subprotocol. + _, streams, err := conn.Open(w, req) + if err != nil { + return nil, err + } + + // Send an empty message to the lowest writable channel to notify the client the connection is established + switch { + case opts.Stdout: + _, err = streams[constants.StreamStdOut].Write([]byte{}) + case opts.Stderr: + _, err = streams[constants.StreamStdErr].Write([]byte{}) + default: + _, err = streams[constants.StreamErr].Write([]byte{}) + } + if err != nil { + conn.Close() + return nil, fmt.Errorf("write error during websocket server creation: %v", err) + } + + ctx := &conns{ + conn: conn, + stdinStream: streams[constants.StreamStdIn], + stdoutStream: streams[constants.StreamStdOut], + stderrStream: streams[constants.StreamStdErr], + tty: opts.Tty, + resizeStream: streams[constants.StreamResize], + } + + // writeStatus returns a WriteStatusFunc that marshals a given api Status + // as json in the error channel. + ctx.writeStatus = func(status *apierrors.StatusError) error { + bs, err := json.Marshal(status.Status()) + if err != nil { + return err + } + // Write status error to error stream with deadline. + conn.SetWriteDeadline(writeErrorDeadline) + _, err = streams[constants.StreamErr].Write(bs) + return err + } + + return ctx, nil +} diff --git a/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger-with-shared-parameters.json b/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger-with-shared-parameters.json index 6ebb420fc0ccb..8e33f7d8bffa8 100644 --- a/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger-with-shared-parameters.json +++ b/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger-with-shared-parameters.json @@ -10558,7 +10558,7 @@ "type": "string" }, "name": { - "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { @@ -11022,7 +11022,7 @@ "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -14104,7 +14104,7 @@ "type": "string" }, "podInfoOnMount": { - "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "type": "boolean" }, "requiresRepublish": { @@ -16736,7 +16736,7 @@ "type": "string" }, "groupPriorityMinimum": { - "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", "format": "int32", "type": "integer" }, diff --git a/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger.json b/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger.json index cc55f91c0c0d7..c1637d9bc08f1 100644 --- a/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger.json +++ b/staging/src/k8s.io/cli-runtime/artifacts/openapi/swagger.json @@ -9016,7 +9016,7 @@ }, "grpc": { "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction", - "description": "GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate." + "description": "GRPC specifies an action involving a GRPC port." }, "httpGet": { "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction", @@ -10938,7 +10938,7 @@ "type": "string" }, "name": { - "description": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { @@ -11140,7 +11140,7 @@ "type": "string" }, "name": { - "description": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { @@ -11726,7 +11726,7 @@ "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -12275,7 +12275,7 @@ "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -14264,7 +14264,7 @@ "type": "string" }, "podInfoOnMount": { - "description": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "description": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "type": "boolean" }, "requiresRepublish": { @@ -16912,7 +16912,7 @@ "type": "string" }, "groupPriorityMinimum": { - "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", "format": "int32", "type": "integer" }, diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index 1965005dca08e..7fd6d962ff5a2 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -2,7 +2,7 @@ module k8s.io/cli-runtime -go 1.20 +go 1.21.3 require ( github.com/evanphx/json-patch v4.12.0+incompatible @@ -10,17 +10,18 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - golang.org/x/sync v0.2.0 - golang.org/x/text v0.11.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/sync v0.3.0 + golang.org/x/text v0.13.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 @@ -28,8 +29,9 @@ require ( ) require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -55,10 +57,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index 3d135d2476aac..1a35e40e32dab 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -1,5 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -11,11 +14,13 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -97,6 +102,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -107,10 +114,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -136,8 +143,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -147,7 +154,7 @@ go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -164,34 +171,35 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -202,8 +210,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -244,11 +252,11 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go index 03f4b5332bb9f..9c16835096016 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go @@ -27,6 +27,8 @@ import ( "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" "k8s.io/client-go/discovery" diskcached "k8s.io/client-go/discovery/cached/disk" "k8s.io/client-go/rest" @@ -122,6 +124,9 @@ type ConfigFlags struct { // Allows increasing qps used for discovery, this is useful // in clusters with many registered resources discoveryQPS float32 + // Allows all possible warnings are printed in a standardized + // format. + warningPrinter *printers.WarningPrinter } // ToRESTConfig implements RESTClientGetter. @@ -332,7 +337,11 @@ func (f *ConfigFlags) toRESTMapper() (meta.RESTMapper, error) { } mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) - expander := restmapper.NewShortcutExpander(mapper, discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient, func(a string) { + if f.warningPrinter != nil { + f.warningPrinter.Print(a) + } + }) return expander, nil } @@ -428,6 +437,12 @@ func (f *ConfigFlags) WithWrapConfigFn(wrapConfigFn func(*rest.Config) *rest.Con return f } +// WithWarningPrinter initializes WarningPrinter with the given IOStreams +func (f *ConfigFlags) WithWarningPrinter(ioStreams genericiooptions.IOStreams) *ConfigFlags { + f.warningPrinter = printers.NewWarningPrinter(ioStreams.ErrOut, printers.WarningPrinterOptions{Color: printers.AllowsColorOutput(ioStreams.ErrOut)}) + return f +} + // NewConfigFlags returns ConfigFlags with default values set func NewConfigFlags(usePersistentConfig bool) *ConfigFlags { impersonateGroup := []string{} diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go index 7a96481552d82..9bb5e28c94e85 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go @@ -66,7 +66,7 @@ func (f *TestConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { } if f.discoveryClient != nil { mapper := restmapper.NewDeferredDiscoveryRESTMapper(f.discoveryClient) - expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient, nil) return expander, nil } return nil, fmt.Errorf("no restmapper") diff --git a/staging/src/k8s.io/cli-runtime/pkg/printers/terminal.go b/staging/src/k8s.io/cli-runtime/pkg/printers/terminal.go index 5a59491e492be..9dc904e59c422 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/printers/terminal.go +++ b/staging/src/k8s.io/cli-runtime/pkg/printers/terminal.go @@ -18,7 +18,11 @@ package printers import ( "io" + "os" + "runtime" "strings" + + "github.com/moby/term" ) // terminalEscaper replaces ANSI escape sequences and other terminal special @@ -37,3 +41,35 @@ func WriteEscaped(writer io.Writer, output string) error { func EscapeTerminal(in string) string { return terminalEscaper.Replace(in) } + +// IsTerminal returns whether the passed object is a terminal or not +func IsTerminal(i interface{}) bool { + _, terminal := term.GetFdInfo(i) + return terminal +} + +// AllowsColorOutput returns true if the specified writer is a terminal and +// the process environment indicates color output is supported and desired. +func AllowsColorOutput(w io.Writer) bool { + if !IsTerminal(w) { + return false + } + + // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals + if os.Getenv("TERM") == "dumb" { + return false + } + + // https://no-color.org/ + if _, nocolor := os.LookupEnv("NO_COLOR"); nocolor { + return false + } + + // On Windows WT_SESSION is set by the modern terminal component. + // Older terminals have poor support for UTF-8, VT escape codes, etc. + if runtime.GOOS == "windows" && os.Getenv("WT_SESSION") == "" { + return false + } + + return true +} diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go b/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go index 77f33007e43be..6713f81e83c6b 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go @@ -56,7 +56,7 @@ func TestCRDFinderErrors(t *testing.T) { } finder := NewCRDFinder(getter) found, err := finder.HasCRD(schema.GroupKind{Group: "", Kind: "Pod"}) - if found == true { + if found { t.Fatalf("Found the CRD with non-working getter function") } if err == nil { diff --git a/staging/src/k8s.io/client-go/OWNERS b/staging/src/k8s.io/client-go/OWNERS index 6426253aed659..97d0ffecea5a5 100644 --- a/staging/src/k8s.io/client-go/OWNERS +++ b/staging/src/k8s.io/client-go/OWNERS @@ -1,6 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - aojea - caesarxuchao - deads2k - liggitt diff --git a/staging/src/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/staging/src/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index 6e373dd4ed17f..e4ae9c49f7935 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -24,6 +24,7 @@ type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` + Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } // LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with @@ -55,3 +56,11 @@ func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActio b.TCPSocket = value return b } + +// WithSleep sets the Sleep field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Sleep field is set to the value of the last call. +func (b *LifecycleHandlerApplyConfiguration) WithSleep(value *SleepActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { + b.Sleep = value + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/staging/src/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index 7d2492203eed3..ac1eab3d8c722 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -29,6 +29,8 @@ type PodAffinityTermApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } // PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with @@ -70,3 +72,23 @@ func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.Labe b.NamespaceSelector = value return b } + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} + +// WithMismatchLabelKeys adds the given value to the MismatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MismatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMismatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MismatchLabelKeys = append(b.MismatchLabelKeys, values[i]) + } + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/staging/src/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go new file mode 100644 index 0000000000000..8b3284536ad41 --- /dev/null +++ b/staging/src/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use +// with apply. +type SleepActionApplyConfiguration struct { + Seconds *int64 `json:"seconds,omitempty"` +} + +// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with +// apply. +func SleepAction() *SleepActionApplyConfiguration { + return &SleepActionApplyConfiguration{} +} + +// WithSeconds sets the Seconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Seconds field is set to the value of the last call. +func (b *SleepActionApplyConfiguration) WithSeconds(value int64) *SleepActionApplyConfiguration { + b.Seconds = &value + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index b05d08a4b431a..1cfd30a6f66fd 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -1013,7 +1013,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1075,7 +1074,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1114,7 +1112,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1227,11 +1224,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1343,7 +1338,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1455,7 +1449,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1586,7 +1579,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1625,11 +1617,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1768,7 +1758,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1899,7 +1888,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1938,7 +1926,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2051,11 +2038,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2167,7 +2152,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2279,7 +2263,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2606,7 +2589,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2860,7 +2842,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: name type: scalar: string @@ -2904,7 +2885,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -2939,7 +2919,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3067,7 +3046,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus map: fields: @@ -3077,7 +3055,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -3102,14 +3079,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus map: fields: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -3139,7 +3114,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: name type: scalar: string @@ -3276,7 +3250,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3591,11 +3564,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3876,11 +3847,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4002,11 +3971,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4716,7 +4683,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateTerminated map: fields: @@ -4730,7 +4696,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: finishedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4743,7 +4708,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateWaiting map: fields: @@ -5099,11 +5063,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: firstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: involvedObject type: namedType: io.k8s.api.core.v1.ObjectReference @@ -5114,7 +5076,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5155,7 +5116,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.core.v1.EventSource map: fields: @@ -5338,7 +5298,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: scheme type: scalar: string @@ -5497,6 +5456,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction + - name: sleep + type: + namedType: io.k8s.api.core.v1.SleepAction - name: tcpSocket type: namedType: io.k8s.api.core.v1.TCPSocketAction @@ -5643,7 +5605,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5731,11 +5692,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastHeartbeatTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6039,11 +5998,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6315,6 +6272,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: mismatchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: namespaceSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector @@ -6349,11 +6318,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6963,7 +6930,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -7039,7 +7005,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: divisor type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: resource type: scalar: string @@ -7462,7 +7427,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetPort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.ServiceSpec map: fields: @@ -7565,6 +7529,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: clientIP type: namedType: io.k8s.api.core.v1.ClientIPConfig +- name: io.k8s.api.core.v1.SleepAction + map: + fields: + - name: seconds + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.StorageOSPersistentVolumeSource map: fields: @@ -7621,7 +7592,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.Taint map: fields: @@ -8172,11 +8142,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8184,7 +8152,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -8227,7 +8194,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.events.v1beta1.Event map: fields: @@ -8243,11 +8209,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8255,7 +8219,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -8298,7 +8261,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -8326,7 +8288,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8442,11 +8403,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8602,7 +8561,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress map: fields: @@ -8813,7 +8771,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8938,7 +8895,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9084,7 +9040,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9266,7 +9221,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9412,7 +9366,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9594,7 +9547,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9740,7 +9692,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -10252,41 +10203,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric -- name: io.k8s.api.networking.v1alpha1.ClusterCIDR - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec - default: {} -- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec - map: - fields: - - name: ipv4 - type: - scalar: string - default: "" - - name: ipv6 - type: - scalar: string - default: "" - - name: nodeSelector - type: - namedType: io.k8s.api.core.v1.NodeSelector - - name: perNodeHostBits - type: - scalar: numeric - default: 0 - name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: @@ -10325,9 +10241,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: resource type: scalar: string - - name: uid - type: - scalar: string - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -10383,7 +10296,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.networking.v1beta1.IngressClass map: fields: @@ -11882,7 +11794,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1.VolumeNodeResources map: fields: @@ -11987,7 +11898,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.CSIDriver map: fields: @@ -12237,7 +12147,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.VolumeNodeResources map: fields: @@ -12252,7 +12161,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -12380,7 +12288,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go deleted file mode 100644 index ad0eae9198e35..0000000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use -// with apply. -type ClusterCIDRApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` -} - -// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with -// apply. -func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { - b := &ClusterCIDRApplyConfiguration{} - b.WithName(name) - b.WithKind("ClusterCIDR") - b.WithAPIVersion("networking.k8s.io/v1alpha1") - return b -} - -// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from -// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a -// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. -// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "") -} - -// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "status") -} - -func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { - b := &ClusterCIDRApplyConfiguration{} - err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(clusterCIDR.Name) - - b.WithKind("ClusterCIDR") - b.WithAPIVersion("networking.k8s.io/v1alpha1") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { - b.Spec = value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go deleted file mode 100644 index 8d5fa406b094c..0000000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use -// with apply. -type ClusterCIDRSpecApplyConfiguration struct { - NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` - IPv4 *string `json:"ipv4,omitempty"` - IPv6 *string `json:"ipv6,omitempty"` -} - -// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with -// apply. -func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { - return &ClusterCIDRSpecApplyConfiguration{} -} - -// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { - b.NodeSelector = value - return b -} - -// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PerNodeHostBits field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { - b.PerNodeHostBits = &value - return b -} - -// WithIPv4 sets the IPv4 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv4 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv4 = &value - return b -} - -// WithIPv6 sets the IPv6 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv6 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv6 = &value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index 14b10b19ff699..ce1049709a09d 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,18 +18,13 @@ limitations under the License. package v1alpha1 -import ( - types "k8s.io/apimachinery/pkg/types" -) - // ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Resource *string `json:"resource,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Name *string `json:"name,omitempty"` - UID *types.UID `json:"uid,omitempty"` + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` } // ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with @@ -69,11 +64,3 @@ func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentRefere b.Name = &value return b } - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { - b.UID = &value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/utils.go b/staging/src/k8s.io/client-go/applyconfigurations/utils.go index 3e01cb9ccaa63..50a69de67a726 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/utils.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/utils.go @@ -911,6 +911,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &applyconfigurationscorev1.ServiceStatusApplyConfiguration{} case corev1.SchemeGroupVersion.WithKind("SessionAffinityConfig"): return &applyconfigurationscorev1.SessionAffinityConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SleepAction"): + return &applyconfigurationscorev1.SleepActionApplyConfiguration{} case corev1.SchemeGroupVersion.WithKind("StorageOSPersistentVolumeSource"): return &applyconfigurationscorev1.StorageOSPersistentVolumeSourceApplyConfiguration{} case corev1.SchemeGroupVersion.WithKind("StorageOSVolumeSource"): @@ -1293,10 +1295,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{} // Group=networking.k8s.io, Version=v1alpha1 - case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR"): - return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRApplyConfiguration{} - case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDRSpec"): - return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRSpecApplyConfiguration{} case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddress"): return &applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration{} case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddressSpec"): diff --git a/staging/src/k8s.io/client-go/discovery/discovery_client.go b/staging/src/k8s.io/client-go/discovery/discovery_client.go index de10158c41d58..df0e0f9974ee9 100644 --- a/staging/src/k8s.io/client-go/discovery/discovery_client.go +++ b/staging/src/k8s.io/client-go/discovery/discovery_client.go @@ -420,6 +420,12 @@ func (e *ErrGroupDiscoveryFailed) Error() string { return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) } +// Is makes it possible for the callers to use `errors.Is(` helper on errors wrapped with ErrGroupDiscoveryFailed error. +func (e *ErrGroupDiscoveryFailed) Is(target error) bool { + _, ok := target.(*ErrGroupDiscoveryFailed) + return ok +} + // IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover // a complete list of APIs for the client to use. func IsGroupDiscoveryFailedError(err error) bool { diff --git a/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1.json b/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1.json index 3913e386074c4..14db75abc6013 100644 --- a/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1.json +++ b/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1.json @@ -1 +1 @@ -{"openapi":"3.0.0","info":{"title":"Kubernetes","version":"v1.24.0"},"paths":{"/apis/batch/v1/":{"get":{"tags":["batch_v1"],"description":"get available resources","operationId":"getBatchV1APIResources","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}}}}}}},"/apis/batch/v1/cronjobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1CronJobForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/jobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind Job","operationId":"listBatchV1JobForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1NamespacedCronJob","parameters":[{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}},{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}}}}}},"post":{"tags":["batch_v1"],"description":"create a CronJob","operationId":"createBatchV1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete collection of CronJob","operationId":"deleteBatchV1CollectionNamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"parameters":[{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1"],"description":"read the specified CronJob","operationId":"readBatchV1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"put":{"tags":["batch_v1"],"description":"replace the specified CronJob","operationId":"replaceBatchV1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete a CronJob","operationId":"deleteBatchV1NamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update the specified CronJob","operationId":"patchBatchV1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status":{"get":{"tags":["batch_v1"],"description":"read status of the specified CronJob","operationId":"readBatchV1NamespacedCronJobStatus","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"put":{"tags":["batch_v1"],"description":"replace status of the specified CronJob","operationId":"replaceBatchV1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update status of the specified CronJob","operationId":"patchBatchV1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/jobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind Job","operationId":"listBatchV1NamespacedJob","parameters":[{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}},{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}}}}}},"post":{"tags":["batch_v1"],"description":"create a Job","operationId":"createBatchV1NamespacedJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete collection of Job","operationId":"deleteBatchV1CollectionNamespacedJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"parameters":[{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/jobs/{name}":{"get":{"tags":["batch_v1"],"description":"read the specified Job","operationId":"readBatchV1NamespacedJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"put":{"tags":["batch_v1"],"description":"replace the specified Job","operationId":"replaceBatchV1NamespacedJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete a Job","operationId":"deleteBatchV1NamespacedJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update the specified Job","operationId":"patchBatchV1NamespacedJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the Job","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status":{"get":{"tags":["batch_v1"],"description":"read status of the specified Job","operationId":"readBatchV1NamespacedJobStatus","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"put":{"tags":["batch_v1"],"description":"replace status of the specified Job","operationId":"replaceBatchV1NamespacedJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update status of the specified Job","operationId":"patchBatchV1NamespacedJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the Job","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/watch/cronjobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1CronJobListForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/jobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1JobListForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1NamespacedCronJobList","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1"],"description":"watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.","operationId":"watchBatchV1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/jobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1NamespacedJobList","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/jobs/{name}":{"get":{"tags":["batch_v1"],"description":"watch changes to an object of kind Job. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.","operationId":"watchBatchV1NamespacedJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"name","in":"path","description":"name of the Job","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]}},"components":{"schemas":{"io.k8s.api.batch.v1.CronJob":{"description":"CronJob represents the configuration of a single cron job.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobSpec"},"status":{"description":"Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobStatus"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJob","version":"v1"}]},"io.k8s.api.batch.v1.CronJobList":{"description":"CronJobList is a collection of cron jobs.","type":"object","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"items":{"description":"items is the list of CronJobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJobList","version":"v1"}]},"io.k8s.api.batch.v1.CronJobSpec":{"description":"CronJobSpec describes how the job execution will look like and when it will actually run.","type":"object","required":["schedule","jobTemplate"],"properties":{"concurrencyPolicy":{"description":"Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one\n\n","type":"string"},"failedJobsHistoryLimit":{"description":"The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.","type":"integer","format":"int32"},"jobTemplate":{"description":"Specifies the job that will be created when executing a CronJob.","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobTemplateSpec"},"schedule":{"description":"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.","type":"string","default":""},"startingDeadlineSeconds":{"description":"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.","type":"integer","format":"int64"},"successfulJobsHistoryLimit":{"description":"The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.","type":"integer","format":"int32"},"suspend":{"description":"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.","type":"boolean"}}},"io.k8s.api.batch.v1.CronJobStatus":{"description":"CronJobStatus represents the current state of a cron job.","type":"object","properties":{"active":{"description":"A list of pointers to currently running jobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectReference"},"x-kubernetes-list-type":"atomic"},"lastScheduleTime":{"description":"Information when was the last time the job was successfully scheduled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"lastSuccessfulTime":{"description":"Information when was the last time the job successfully completed.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.api.batch.v1.Job":{"description":"Job represents the configuration of a single job.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobSpec"},"status":{"description":"Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobStatus"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"Job","version":"v1"}]},"io.k8s.api.batch.v1.JobCondition":{"description":"JobCondition describes current state of a job.","type":"object","required":["type","status"],"properties":{"lastProbeTime":{"description":"Last time the condition was checked.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"lastTransitionTime":{"description":"Last time the condition transit from one status to another.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"message":{"description":"Human readable message indicating details about last transition.","type":"string"},"reason":{"description":"(brief) reason for the condition's last transition.","type":"string"},"status":{"description":"Status of the condition, one of True, False, Unknown.","type":"string","default":""},"type":{"description":"Type of job condition, Complete or Failed.\n\n","type":"string","default":""}}},"io.k8s.api.batch.v1.JobList":{"description":"JobList is a collection of jobs.","type":"object","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"items":{"description":"items is the list of Jobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"JobList","version":"v1"}]},"io.k8s.api.batch.v1.JobSpec":{"description":"JobSpec describes how the job execution will look like.","type":"object","required":["template"],"properties":{"activeDeadlineSeconds":{"description":"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.","type":"integer","format":"int64"},"backoffLimit":{"description":"Specifies the number of retries before marking this job failed. Defaults to 6","type":"integer","format":"int32"},"completionMode":{"description":"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.","type":"string"},"completions":{"description":"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"manualSelector":{"description":"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector","type":"boolean"},"parallelism":{"description":"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"selector":{"description":"A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"suspend":{"description":"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).","type":"boolean"},"template":{"description":"Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodTemplateSpec"},"ttlSecondsAfterFinished":{"description":"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.","type":"integer","format":"int32"}}},"io.k8s.api.batch.v1.JobStatus":{"description":"JobStatus represents the current state of a Job.","type":"object","properties":{"active":{"description":"The number of pending and running pods.","type":"integer","format":"int32"},"completedIndexes":{"description":"CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".","type":"string"},"completionTime":{"description":"Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"conditions":{"description":"The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobCondition"},"x-kubernetes-list-type":"atomic","x-kubernetes-patch-merge-key":"type","x-kubernetes-patch-strategy":"merge"},"failed":{"description":"The number of pods which reached phase Failed.","type":"integer","format":"int32"},"ready":{"description":"The number of pods which have a Ready condition.\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobReadyPods is enabled (disabled by default).","type":"integer","format":"int32"},"startTime":{"description":"Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"succeeded":{"description":"The number of pods which reached phase Succeeded.","type":"integer","format":"int32"},"uncountedTerminatedPods":{"description":"UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is beta-level. The job controller only makes use of this field when the feature gate JobTrackingWithFinalizers is enabled (enabled by default). Old jobs might not be tracked using this field, in which case the field remains null.","$ref":"#/components/schemas/io.k8s.api.batch.v1.UncountedTerminatedPods"}}},"io.k8s.api.batch.v1.JobTemplateSpec":{"description":"JobTemplateSpec describes the data a Job should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobSpec"}}},"io.k8s.api.batch.v1.UncountedTerminatedPods":{"description":"UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.","type":"object","properties":{"failed":{"description":"Failed holds UIDs of failed Pods.","type":"array","items":{"type":"string","default":""},"x-kubernetes-list-type":"set"},"succeeded":{"description":"Succeeded holds UIDs of succeeded Pods.","type":"array","items":{"type":"string","default":""},"x-kubernetes-list-type":"set"}}},"io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource":{"description":"Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).","type":"integer","format":"int32"},"readOnly":{"description":"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"boolean"},"volumeID":{"description":"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string","default":""}}},"io.k8s.api.core.v1.Affinity":{"description":"Affinity is a group of affinity scheduling rules.","type":"object","properties":{"nodeAffinity":{"description":"Describes node affinity scheduling rules for the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeAffinity"},"podAffinity":{"description":"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinity"},"podAntiAffinity":{"description":"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAntiAffinity"}}},"io.k8s.api.core.v1.AzureDiskVolumeSource":{"description":"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","type":"object","required":["diskName","diskURI"],"properties":{"cachingMode":{"description":"cachingMode is the Host Caching mode: None, Read Only, Read Write.","type":"string"},"diskName":{"description":"diskName is the Name of the data disk in the blob storage","type":"string","default":""},"diskURI":{"description":"diskURI is the URI of data disk in the blob storage","type":"string","default":""},"fsType":{"description":"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"kind":{"description":"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"}}},"io.k8s.api.core.v1.AzureFileVolumeSource":{"description":"AzureFile represents an Azure File Service mount on the host and bind mount to the pod.","type":"object","required":["secretName","shareName"],"properties":{"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretName":{"description":"secretName is the name of secret that contains Azure Storage Account Name and Key","type":"string","default":""},"shareName":{"description":"shareName is the azure share Name","type":"string","default":""}}},"io.k8s.api.core.v1.CSIVolumeSource":{"description":"Represents a source location of a volume to mount, managed by an external CSI driver","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.","type":"string","default":""},"fsType":{"description":"fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.","type":"string"},"nodePublishSecretRef":{"description":"nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"readOnly":{"description":"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).","type":"boolean"},"volumeAttributes":{"description":"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.","type":"object","additionalProperties":{"type":"string","default":""}}}},"io.k8s.api.core.v1.Capabilities":{"description":"Adds and removes POSIX capabilities from running containers.","type":"object","properties":{"add":{"description":"Added capabilities","type":"array","items":{"type":"string","default":""}},"drop":{"description":"Removed capabilities","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.CephFSVolumeSource":{"description":"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["monitors"],"properties":{"monitors":{"description":"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"path":{"description":"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /","type":"string"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"boolean"},"secretFile":{"description":"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"},"secretRef":{"description":"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.CinderVolumeSource":{"description":"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"boolean"},"secretRef":{"description":"secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeID":{"description":"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string","default":""}}},"io.k8s.api.core.v1.ConfigMapEnvSource":{"description":"ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapKeySelector":{"description":"Selects a key from a ConfigMap.","type":"object","required":["key"],"properties":{"key":{"description":"The key to select.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ConfigMapProjection":{"description":"Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapVolumeSource":{"description":"Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.Container":{"description":"A single application container that you want to run within a pod.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Actions that the management system should take in response to container lifecycle events. Cannot be updated.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.","type":"string","default":""},"ports":{"description":"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.ContainerPort":{"description":"ContainerPort represents a network port in a single container.","type":"object","required":["containerPort"],"properties":{"containerPort":{"description":"Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.","type":"integer","format":"int32","default":0},"hostIP":{"description":"What host IP to bind the external port to.","type":"string"},"hostPort":{"description":"Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.","type":"integer","format":"int32"},"name":{"description":"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.","type":"string"},"protocol":{"description":"Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\n\n","type":"string","default":"TCP"}}},"io.k8s.api.core.v1.DownwardAPIProjection":{"description":"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.","type":"object","properties":{"items":{"description":"Items is a list of DownwardAPIVolume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.DownwardAPIVolumeFile":{"description":"DownwardAPIVolumeFile represents information to create the file containing the pod field","type":"object","required":["path"],"properties":{"fieldRef":{"description":"Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"mode":{"description":"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'","type":"string","default":""},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"}}},"io.k8s.api.core.v1.DownwardAPIVolumeSource":{"description":"DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"Items is a list of downward API volume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.EmptyDirVolumeSource":{"description":"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.","type":"object","properties":{"medium":{"description":"medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","type":"string"},"sizeLimit":{"description":"sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}},"io.k8s.api.core.v1.EnvFromSource":{"description":"EnvFromSource represents the source of a set of ConfigMaps","type":"object","properties":{"configMapRef":{"description":"The ConfigMap to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapEnvSource"},"prefix":{"description":"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.","type":"string"},"secretRef":{"description":"The Secret to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretEnvSource"}}},"io.k8s.api.core.v1.EnvVar":{"description":"EnvVar represents an environment variable present in a Container.","type":"object","required":["name"],"properties":{"name":{"description":"Name of the environment variable. Must be a C_IDENTIFIER.","type":"string","default":""},"value":{"description":"Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".","type":"string"},"valueFrom":{"description":"Source for the environment variable's value. Cannot be used if value is not empty.","$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVarSource"}}},"io.k8s.api.core.v1.EnvVarSource":{"description":"EnvVarSource represents a source for the value of an EnvVar.","type":"object","properties":{"configMapKeyRef":{"description":"Selects a key of a ConfigMap.","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapKeySelector"},"fieldRef":{"description":"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"},"secretKeyRef":{"description":"Selects a key of a secret in the pod's namespace","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretKeySelector"}}},"io.k8s.api.core.v1.EphemeralContainer":{"description":"An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Lifecycle is not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.","type":"string","default":""},"ports":{"description":"Ports are not allowed for ephemeral containers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"targetContainerName":{"description":"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.","type":"string"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.EphemeralVolumeSource":{"description":"Represents an ephemeral volume that is handled by a normal storage driver.","type":"object","properties":{"volumeClaimTemplate":{"description":"Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimTemplate"}}},"io.k8s.api.core.v1.ExecAction":{"description":"ExecAction describes a \"run in container\" action.","type":"object","properties":{"command":{"description":"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FCVolumeSource":{"description":"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"lun":{"description":"lun is Optional: FC target lun number","type":"integer","format":"int32"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"targetWWNs":{"description":"targetWWNs is Optional: FC target worldwide names (WWNs)","type":"array","items":{"type":"string","default":""}},"wwids":{"description":"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FlexVolumeSource":{"description":"FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the driver to use for this volume.","type":"string","default":""},"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.","type":"string"},"options":{"description":"options is Optional: this field holds extra command options if any.","type":"object","additionalProperties":{"type":"string","default":""}},"readOnly":{"description":"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"}}},"io.k8s.api.core.v1.FlockerVolumeSource":{"description":"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.","type":"object","properties":{"datasetName":{"description":"datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated","type":"string"},"datasetUUID":{"description":"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset","type":"string"}}},"io.k8s.api.core.v1.GCEPersistentDiskVolumeSource":{"description":"Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.","type":"object","required":["pdName"],"properties":{"fsType":{"description":"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"integer","format":"int32"},"pdName":{"description":"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string","default":""},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"boolean"}}},"io.k8s.api.core.v1.GRPCAction":{"type":"object","required":["port"],"properties":{"port":{"description":"Port number of the gRPC service. Number must be in the range 1 to 65535.","type":"integer","format":"int32","default":0},"service":{"description":"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.","type":"string","default":""}}},"io.k8s.api.core.v1.GitRepoVolumeSource":{"description":"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","type":"object","required":["repository"],"properties":{"directory":{"description":"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.","type":"string"},"repository":{"description":"repository is the URL","type":"string","default":""},"revision":{"description":"revision is the commit hash for the specified revision.","type":"string"}}},"io.k8s.api.core.v1.GlusterfsVolumeSource":{"description":"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["endpoints","path"],"properties":{"endpoints":{"description":"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"path":{"description":"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"readOnly":{"description":"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"boolean"}}},"io.k8s.api.core.v1.HTTPGetAction":{"description":"HTTPGetAction describes an action based on HTTP Get requests.","type":"object","required":["port"],"properties":{"host":{"description":"Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.","type":"string"},"httpHeaders":{"description":"Custom headers to set in the request. HTTP allows repeated headers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPHeader"}},"path":{"description":"Path to access on the HTTP server.","type":"string"},"port":{"description":"Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"},"scheme":{"description":"Scheme to use for connecting to the host. Defaults to HTTP.\n\n","type":"string"}}},"io.k8s.api.core.v1.HTTPHeader":{"description":"HTTPHeader describes a custom header to be used in HTTP probes","type":"object","required":["name","value"],"properties":{"name":{"description":"The header field name","type":"string","default":""},"value":{"description":"The header field value","type":"string","default":""}}},"io.k8s.api.core.v1.HostAlias":{"description":"HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.","type":"object","properties":{"hostnames":{"description":"Hostnames for the above IP address.","type":"array","items":{"type":"string","default":""}},"ip":{"description":"IP address of the host file entry.","type":"string"}}},"io.k8s.api.core.v1.HostPathVolumeSource":{"description":"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.","type":"object","required":["path"],"properties":{"path":{"description":"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string","default":""},"type":{"description":"type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string"}}},"io.k8s.api.core.v1.ISCSIVolumeSource":{"description":"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.","type":"object","required":["targetPortal","iqn","lun"],"properties":{"chapAuthDiscovery":{"description":"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication","type":"boolean"},"chapAuthSession":{"description":"chapAuthSession defines whether support iSCSI Session CHAP authentication","type":"boolean"},"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi","type":"string"},"initiatorName":{"description":"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.","type":"string"},"iqn":{"description":"iqn is the target iSCSI Qualified Name.","type":"string","default":""},"iscsiInterface":{"description":"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).","type":"string"},"lun":{"description":"lun represents iSCSI Target Lun number.","type":"integer","format":"int32","default":0},"portals":{"description":"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"array","items":{"type":"string","default":""}},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.","type":"boolean"},"secretRef":{"description":"secretRef is the CHAP Secret for iSCSI target and initiator authentication","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"targetPortal":{"description":"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"string","default":""}}},"io.k8s.api.core.v1.KeyToPath":{"description":"Maps a string key to a path within a volume.","type":"object","required":["key","path"],"properties":{"key":{"description":"key is the key to project.","type":"string","default":""},"mode":{"description":"mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.","type":"string","default":""}}},"io.k8s.api.core.v1.Lifecycle":{"description":"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.","type":"object","properties":{"postStart":{"description":"PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"},"preStop":{"description":"PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"}}},"io.k8s.api.core.v1.LifecycleHandler":{"description":"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"tcpSocket":{"description":"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"}}},"io.k8s.api.core.v1.LocalObjectReference":{"description":"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NFSVolumeSource":{"description":"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.","type":"object","required":["server","path"],"properties":{"path":{"description":"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""},"readOnly":{"description":"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"boolean"},"server":{"description":"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""}}},"io.k8s.api.core.v1.NodeAffinity":{"description":"Node affinity is a group of node affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PreferredSchedulingTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelector"}}},"io.k8s.api.core.v1.NodeSelector":{"description":"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.","type":"object","required":["nodeSelectorTerms"],"properties":{"nodeSelectorTerms":{"description":"Required. A list of node selector terms. The terms are ORed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NodeSelectorRequirement":{"description":"A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"The label key that the selector applies to.","type":"string","default":""},"operator":{"description":"Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\n","type":"string","default":""},"values":{"description":"An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.NodeSelectorTerm":{"description":"A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.","type":"object","properties":{"matchExpressions":{"description":"A list of node selector requirements by node's labels.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}},"matchFields":{"description":"A list of node selector requirements by node's fields.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectFieldSelector":{"description":"ObjectFieldSelector selects an APIVersioned field of an object.","type":"object","required":["fieldPath"],"properties":{"apiVersion":{"description":"Version of the schema the FieldPath is written in terms of, defaults to \"v1\".","type":"string"},"fieldPath":{"description":"Path of the field to select in the specified API version.","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectReference":{"description":"ObjectReference contains enough information to let you inspect or modify the referred object.","type":"object","properties":{"apiVersion":{"description":"API version of the referent.","type":"string"},"fieldPath":{"description":"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.","type":"string"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"namespace":{"description":"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/","type":"string"},"resourceVersion":{"description":"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.PersistentVolumeClaimSpec":{"description":"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes","type":"object","properties":{"accessModes":{"description":"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1","type":"array","items":{"type":"string","default":""}},"dataSource":{"description":"dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"dataSourceRef":{"description":"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"resources":{"description":"resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"selector":{"description":"selector is a label query over volumes to consider for binding.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"storageClassName":{"description":"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1","type":"string"},"volumeMode":{"description":"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.","type":"string"},"volumeName":{"description":"volumeName is the binding reference to the PersistentVolume backing this claim.","type":"string"}}},"io.k8s.api.core.v1.PersistentVolumeClaimTemplate":{"description":"PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.","type":"object","required":["spec"],"properties":{"metadata":{"description":"May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimSpec"}}},"io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource":{"description":"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).","type":"object","required":["claimName"],"properties":{"claimName":{"description":"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","type":"string","default":""},"readOnly":{"description":"readOnly Will force the ReadOnly setting in VolumeMounts. Default false.","type":"boolean"}}},"io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource":{"description":"Represents a Photon Controller persistent disk resource.","type":"object","required":["pdID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"pdID":{"description":"pdID is the ID that identifies Photon Controller persistent disk","type":"string","default":""}}},"io.k8s.api.core.v1.PodAffinity":{"description":"Pod affinity is a group of inter pod affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodAffinityTerm":{"description":"Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running","type":"object","required":["topologyKey"],"properties":{"labelSelector":{"description":"A label query over a set of resources, in this case pods.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaceSelector":{"description":"A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaces":{"description":"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"","type":"array","items":{"type":"string","default":""}},"topologyKey":{"description":"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.","type":"string","default":""}}},"io.k8s.api.core.v1.PodAntiAffinity":{"description":"Pod anti affinity is a group of inter pod anti affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodDNSConfig":{"description":"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.","type":"object","properties":{"nameservers":{"description":"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.","type":"array","items":{"type":"string","default":""}},"options":{"description":"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfigOption"}},"searches":{"description":"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.PodDNSConfigOption":{"description":"PodDNSConfigOption defines DNS resolver options of a pod.","type":"object","properties":{"name":{"description":"Required.","type":"string"},"value":{"type":"string"}}},"io.k8s.api.core.v1.PodOS":{"description":"PodOS defines the OS parameters of a pod.","type":"object","required":["name"],"properties":{"name":{"description":"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null","type":"string","default":""}}},"io.k8s.api.core.v1.PodReadinessGate":{"description":"PodReadinessGate contains the reference to a pod condition","type":"object","required":["conditionType"],"properties":{"conditionType":{"description":"ConditionType refers to a condition in the pod's condition list with matching type.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.PodSecurityContext":{"description":"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.","type":"object","properties":{"fsGroup":{"description":"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"fsGroupChangePolicy":{"description":"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"supplementalGroups":{"description":"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"type":"integer","format":"int64","default":0}},"sysctls":{"description":"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Sysctl"}},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.PodSpec":{"description":"PodSpec is a description of a pod.","type":"object","required":["containers"],"properties":{"activeDeadlineSeconds":{"description":"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.","type":"integer","format":"int64"},"affinity":{"description":"If specified, the pod's scheduling constraints","$ref":"#/components/schemas/io.k8s.api.core.v1.Affinity"},"automountServiceAccountToken":{"description":"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.","type":"boolean"},"containers":{"description":"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"dnsConfig":{"description":"Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfig"},"dnsPolicy":{"description":"Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\n","type":"string"},"enableServiceLinks":{"description":"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.","type":"boolean"},"ephemeralContainers":{"description":"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralContainer"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"hostAliases":{"description":"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HostAlias"},"x-kubernetes-patch-merge-key":"ip","x-kubernetes-patch-strategy":"merge"},"hostIPC":{"description":"Use the host's ipc namespace. Optional: Default to false.","type":"boolean"},"hostNetwork":{"description":"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.","type":"boolean"},"hostPID":{"description":"Use the host's pid namespace. Optional: Default to false.","type":"boolean"},"hostname":{"description":"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.","type":"string"},"imagePullSecrets":{"description":"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"initContainers":{"description":"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"nodeName":{"description":"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.","type":"string"},"nodeSelector":{"description":"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/","type":"object","additionalProperties":{"type":"string","default":""},"x-kubernetes-map-type":"atomic"},"os":{"description":"Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature","$ref":"#/components/schemas/io.k8s.api.core.v1.PodOS"},"overhead":{"description":"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"preemptionPolicy":{"description":"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.","type":"string"},"priority":{"description":"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.","type":"integer","format":"int32"},"priorityClassName":{"description":"If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.","type":"string"},"readinessGates":{"description":"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodReadinessGate"}},"restartPolicy":{"description":"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n","type":"string"},"runtimeClassName":{"description":"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.","type":"string"},"schedulerName":{"description":"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.","type":"string"},"securityContext":{"description":"SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodSecurityContext"},"serviceAccount":{"description":"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.","type":"string"},"serviceAccountName":{"description":"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/","type":"string"},"setHostnameAsFQDN":{"description":"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.","type":"boolean"},"shareProcessNamespace":{"description":"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.","type":"boolean"},"subdomain":{"description":"If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.","type":"string"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.","type":"integer","format":"int64"},"tolerations":{"description":"If specified, the pod's tolerations.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Toleration"}},"topologySpreadConstraints":{"description":"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.TopologySpreadConstraint"},"x-kubernetes-list-map-keys":["topologyKey","whenUnsatisfiable"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"topologyKey","x-kubernetes-patch-strategy":"merge"},"volumes":{"description":"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Volume"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge,retainKeys"}}},"io.k8s.api.core.v1.PodTemplateSpec":{"description":"PodTemplateSpec describes the data a pod should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodSpec"}}},"io.k8s.api.core.v1.PortworxVolumeSource":{"description":"PortworxVolumeSource represents a Portworx volume resource.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"volumeID":{"description":"volumeID uniquely identifies a Portworx volume","type":"string","default":""}}},"io.k8s.api.core.v1.PreferredSchedulingTerm":{"description":"An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).","type":"object","required":["weight","preference"],"properties":{"preference":{"description":"A node selector term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"},"weight":{"description":"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.Probe":{"description":"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"failureThreshold":{"description":"Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.","type":"integer","format":"int32"},"grpc":{"description":"GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.","$ref":"#/components/schemas/io.k8s.api.core.v1.GRPCAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"initialDelaySeconds":{"description":"Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"},"periodSeconds":{"description":"How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.","type":"integer","format":"int32"},"successThreshold":{"description":"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.","type":"integer","format":"int32"},"tcpSocket":{"description":"TCPSocket specifies an action involving a TCP port.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.","type":"integer","format":"int64"},"timeoutSeconds":{"description":"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"}}},"io.k8s.api.core.v1.ProjectedVolumeSource":{"description":"Represents a projected volume source","type":"object","properties":{"defaultMode":{"description":"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"sources":{"description":"sources is the list of volume projections","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeProjection"}}}},"io.k8s.api.core.v1.QuobyteVolumeSource":{"description":"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.","type":"object","required":["registry","volume"],"properties":{"group":{"description":"group to map volume access to Default is no group","type":"string"},"readOnly":{"description":"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.","type":"boolean"},"registry":{"description":"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes","type":"string","default":""},"tenant":{"description":"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin","type":"string"},"user":{"description":"user to map volume access to Defaults to serivceaccount user","type":"string"},"volume":{"description":"volume is a string that references an already created Quobyte volume by name.","type":"string","default":""}}},"io.k8s.api.core.v1.RBDVolumeSource":{"description":"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.","type":"object","required":["monitors","image"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd","type":"string"},"image":{"description":"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string","default":""},"keyring":{"description":"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"monitors":{"description":"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"pool":{"description":"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"boolean"},"secretRef":{"description":"secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.ResourceFieldSelector":{"description":"ResourceFieldSelector represents container resources (cpu, memory) and their output format","type":"object","required":["resource"],"properties":{"containerName":{"description":"Container name: required for volumes, optional for env vars","type":"string"},"divisor":{"description":"Specifies the output format of the exposed resources, defaults to \"1\"","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"},"resource":{"description":"Required: resource to select","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ResourceRequirements":{"description":"ResourceRequirements describes the compute resource requirements.","type":"object","properties":{"limits":{"description":"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"requests":{"description":"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}}},"io.k8s.api.core.v1.SELinuxOptions":{"description":"SELinuxOptions are the labels to be applied to the container","type":"object","properties":{"level":{"description":"Level is SELinux level label that applies to the container.","type":"string"},"role":{"description":"Role is a SELinux role label that applies to the container.","type":"string"},"type":{"description":"Type is a SELinux type label that applies to the container.","type":"string"},"user":{"description":"User is a SELinux user label that applies to the container.","type":"string"}}},"io.k8s.api.core.v1.ScaleIOVolumeSource":{"description":"ScaleIOVolumeSource represents a persistent ScaleIO volume","type":"object","required":["gateway","system","secretRef"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".","type":"string"},"gateway":{"description":"gateway is the host address of the ScaleIO API Gateway.","type":"string","default":""},"protectionDomain":{"description":"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"sslEnabled":{"description":"sslEnabled Flag enable/disable SSL communication with Gateway, default false","type":"boolean"},"storageMode":{"description":"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.","type":"string"},"storagePool":{"description":"storagePool is the ScaleIO Storage Pool associated with the protection domain.","type":"string"},"system":{"description":"system is the name of the storage system as configured in ScaleIO.","type":"string","default":""},"volumeName":{"description":"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.","type":"string"}}},"io.k8s.api.core.v1.SeccompProfile":{"description":"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.","type":"object","required":["type"],"properties":{"localhostProfile":{"description":"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".","type":"string"},"type":{"description":"type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.\n\n","type":"string","default":""}},"x-kubernetes-unions":[{"discriminator":"type","fields-to-discriminateBy":{"localhostProfile":"LocalhostProfile"}}]},"io.k8s.api.core.v1.SecretEnvSource":{"description":"SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretKeySelector":{"description":"SecretKeySelector selects a key of a Secret.","type":"object","required":["key"],"properties":{"key":{"description":"The key of the secret to select from. Must be a valid secret key.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.SecretProjection":{"description":"Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional field specify whether the Secret or its key must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretVolumeSource":{"description":"Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"optional":{"description":"optional field specify whether the Secret or its keys must be defined","type":"boolean"},"secretName":{"description":"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","type":"string"}}},"io.k8s.api.core.v1.SecurityContext":{"description":"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.","type":"object","properties":{"allowPrivilegeEscalation":{"description":"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"capabilities":{"description":"The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.Capabilities"},"privileged":{"description":"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"procMount":{"description":"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"readOnlyRootFilesystem":{"description":"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by this container. If seccomp options are provided at both the pod \u0026 container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.ServiceAccountTokenProjection":{"description":"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).","type":"object","required":["path"],"properties":{"audience":{"description":"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.","type":"string"},"expirationSeconds":{"description":"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.","type":"integer","format":"int64"},"path":{"description":"path is the path relative to the mount point of the file to project the token into.","type":"string","default":""}}},"io.k8s.api.core.v1.StorageOSVolumeSource":{"description":"Represents a StorageOS persistent volume resource.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeName":{"description":"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.","type":"string"},"volumeNamespace":{"description":"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.","type":"string"}}},"io.k8s.api.core.v1.Sysctl":{"description":"Sysctl defines a kernel parameter to be set","type":"object","required":["name","value"],"properties":{"name":{"description":"Name of a property to set","type":"string","default":""},"value":{"description":"Value of a property to set","type":"string","default":""}}},"io.k8s.api.core.v1.TCPSocketAction":{"description":"TCPSocketAction describes an action based on opening a socket","type":"object","required":["port"],"properties":{"host":{"description":"Optional: Host name to connect to, defaults to the pod IP.","type":"string"},"port":{"description":"Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}}},"io.k8s.api.core.v1.Toleration":{"description":"The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.","type":"object","properties":{"effect":{"description":"Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n\n","type":"string"},"key":{"description":"Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.","type":"string"},"operator":{"description":"Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.\n\n","type":"string"},"tolerationSeconds":{"description":"TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.","type":"integer","format":"int64"},"value":{"description":"Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.","type":"string"}}},"io.k8s.api.core.v1.TopologySpreadConstraint":{"description":"TopologySpreadConstraint specifies how to spread matching pods among the given topology.","type":"object","required":["maxSkew","topologyKey","whenUnsatisfiable"],"properties":{"labelSelector":{"description":"LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"maxSkew":{"description":"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.","type":"integer","format":"int32","default":0},"topologyKey":{"description":"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.","type":"string","default":""},"whenUnsatisfiable":{"description":"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.TypedLocalObjectReference":{"description":"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.","type":"object","required":["kind","name"],"properties":{"apiGroup":{"description":"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.","type":"string"},"kind":{"description":"Kind is the type of resource being referenced","type":"string","default":""},"name":{"description":"Name is the name of resource being referenced","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.Volume":{"description":"Volume represents a named volume in a pod that may be accessed by any container in the pod.","type":"object","required":["name"],"properties":{"awsElasticBlockStore":{"description":"awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","$ref":"#/components/schemas/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"},"azureDisk":{"description":"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureDiskVolumeSource"},"azureFile":{"description":"azureFile represents an Azure File Service mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureFileVolumeSource"},"cephfs":{"description":"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.CephFSVolumeSource"},"cinder":{"description":"cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.CinderVolumeSource"},"configMap":{"description":"configMap represents a configMap that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapVolumeSource"},"csi":{"description":"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).","$ref":"#/components/schemas/io.k8s.api.core.v1.CSIVolumeSource"},"downwardAPI":{"description":"downwardAPI represents downward API about the pod that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeSource"},"emptyDir":{"description":"emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.api.core.v1.EmptyDirVolumeSource"},"ephemeral":{"description":"ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.","$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralVolumeSource"},"fc":{"description":"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.FCVolumeSource"},"flexVolume":{"description":"flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","$ref":"#/components/schemas/io.k8s.api.core.v1.FlexVolumeSource"},"flocker":{"description":"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running","$ref":"#/components/schemas/io.k8s.api.core.v1.FlockerVolumeSource"},"gcePersistentDisk":{"description":"gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","$ref":"#/components/schemas/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"},"gitRepo":{"description":"gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","$ref":"#/components/schemas/io.k8s.api.core.v1.GitRepoVolumeSource"},"glusterfs":{"description":"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.GlusterfsVolumeSource"},"hostPath":{"description":"hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","$ref":"#/components/schemas/io.k8s.api.core.v1.HostPathVolumeSource"},"iscsi":{"description":"iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.ISCSIVolumeSource"},"name":{"description":"name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string","default":""},"nfs":{"description":"nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","$ref":"#/components/schemas/io.k8s.api.core.v1.NFSVolumeSource"},"persistentVolumeClaim":{"description":"persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"},"photonPersistentDisk":{"description":"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"},"portworxVolume":{"description":"portworxVolume represents a portworx volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PortworxVolumeSource"},"projected":{"description":"projected items for all in one resources secrets, configmaps, and downward API","$ref":"#/components/schemas/io.k8s.api.core.v1.ProjectedVolumeSource"},"quobyte":{"description":"quobyte represents a Quobyte mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.QuobyteVolumeSource"},"rbd":{"description":"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.RBDVolumeSource"},"scaleIO":{"description":"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.ScaleIOVolumeSource"},"secret":{"description":"secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretVolumeSource"},"storageos":{"description":"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.StorageOSVolumeSource"},"vsphereVolume":{"description":"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"}}},"io.k8s.api.core.v1.VolumeDevice":{"description":"volumeDevice describes a mapping of a raw block device within a container.","type":"object","required":["name","devicePath"],"properties":{"devicePath":{"description":"devicePath is the path inside of the container that the device will be mapped to.","type":"string","default":""},"name":{"description":"name must match the name of a persistentVolumeClaim in the pod","type":"string","default":""}}},"io.k8s.api.core.v1.VolumeMount":{"description":"VolumeMount describes a mounting of a Volume within a container.","type":"object","required":["name","mountPath"],"properties":{"mountPath":{"description":"Path within the container at which the volume should be mounted. Must not contain ':'.","type":"string","default":""},"mountPropagation":{"description":"mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.","type":"string"},"name":{"description":"This must match the Name of a Volume.","type":"string","default":""},"readOnly":{"description":"Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.","type":"boolean"},"subPath":{"description":"Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).","type":"string"},"subPathExpr":{"description":"Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.","type":"string"}}},"io.k8s.api.core.v1.VolumeProjection":{"description":"Projection that may be projected along with other supported volume types","type":"object","properties":{"configMap":{"description":"configMap information about the configMap data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapProjection"},"downwardAPI":{"description":"downwardAPI information about the downwardAPI data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIProjection"},"secret":{"description":"secret information about the secret data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretProjection"},"serviceAccountToken":{"description":"serviceAccountToken is information about the serviceAccountToken data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ServiceAccountTokenProjection"}}},"io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource":{"description":"Represents a vSphere volume resource.","type":"object","required":["volumePath"],"properties":{"fsType":{"description":"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"storagePolicyID":{"description":"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.","type":"string"},"storagePolicyName":{"description":"storagePolicyName is the storage Policy Based Management (SPBM) profile name.","type":"string"},"volumePath":{"description":"volumePath is the path that identifies vSphere volume vmdk","type":"string","default":""}}},"io.k8s.api.core.v1.WeightedPodAffinityTerm":{"description":"The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)","type":"object","required":["weight","podAffinityTerm"],"properties":{"podAffinityTerm":{"description":"Required. A pod affinity term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"},"weight":{"description":"weight associated with matching the corresponding podAffinityTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.WindowsSecurityContextOptions":{"description":"WindowsSecurityContextOptions contain Windows-specific options and credentials.","type":"object","properties":{"gmsaCredentialSpec":{"description":"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.","type":"string"},"gmsaCredentialSpecName":{"description":"GMSACredentialSpecName is the name of the GMSA credential spec to use.","type":"string"},"hostProcess":{"description":"HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.","type":"boolean"},"runAsUserName":{"description":"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"string"}}},"io.k8s.apimachinery.pkg.api.resource.Quantity":{"description":"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.","type":"string"},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResource":{"description":"APIResource specifies the name of a resource and whether it is namespaced.","type":"object","required":["name","singularName","namespaced","kind","verbs"],"properties":{"categories":{"description":"categories is a list of the grouped resources this resource belongs to (e.g. 'all')","type":"array","items":{"type":"string","default":""}},"group":{"description":"group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".","type":"string"},"kind":{"description":"kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')","type":"string","default":""},"name":{"description":"name is the plural name of the resource.","type":"string","default":""},"namespaced":{"description":"namespaced indicates if a resource is namespaced or not.","type":"boolean","default":false},"shortNames":{"description":"shortNames is a list of suggested short names of the resource.","type":"array","items":{"type":"string","default":""}},"singularName":{"description":"singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.","type":"string","default":""},"storageVersionHash":{"description":"The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.","type":"string"},"verbs":{"description":"verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)","type":"array","items":{"type":"string","default":""}},"version":{"description":"version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList":{"description":"APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.","type":"object","required":["groupVersion","resources"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"groupVersion":{"description":"groupVersion is the group and version this APIResourceList is for.","type":"string","default":""},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"resources":{"description":"resources contains the name of the resources and if they are namespaced.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"}}},"x-kubernetes-group-version-kind":[{"group":"","kind":"APIResourceList","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions":{"description":"DeleteOptions may be provided when deleting an API object.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"dryRun":{"description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","type":"array","items":{"type":"string","default":""}},"gracePeriodSeconds":{"description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","type":"integer","format":"int64"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"orphanDependents":{"description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","type":"boolean"},"preconditions":{"description":"Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"},"propagationPolicy":{"description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta2"},{"group":"batch","kind":"DeleteOptions","version":"v1"},{"group":"batch","kind":"DeleteOptions","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"extensions","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"policy","kind":"DeleteOptions","version":"v1"},{"group":"policy","kind":"DeleteOptions","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1":{"description":"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector":{"description":"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.","type":"object","properties":{"matchExpressions":{"description":"matchExpressions is a list of label selector requirements. The requirements are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"}},"matchLabels":{"description":"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.","type":"object","additionalProperties":{"type":"string","default":""}}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement":{"description":"A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"key is the label key that the selector applies to.","type":"string","default":"","x-kubernetes-patch-merge-key":"key","x-kubernetes-patch-strategy":"merge"},"operator":{"description":"operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.","type":"string","default":""},"values":{"description":"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta":{"description":"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.","type":"object","properties":{"continue":{"description":"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.","type":"string"},"remainingItemCount":{"description":"remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.","type":"integer","format":"int64"},"resourceVersion":{"description":"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry":{"description":"ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.","type":"string"},"fieldsType":{"description":"FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"","type":"string"},"fieldsV1":{"description":"FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"},"manager":{"description":"Manager is an identifier of the workflow managing these fields.","type":"string"},"operation":{"description":"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.","type":"string"},"subresource":{"description":"Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.","type":"string"},"time":{"description":"Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta":{"description":"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.","type":"object","properties":{"annotations":{"description":"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations","type":"object","additionalProperties":{"type":"string","default":""}},"clusterName":{"description":"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.","type":"string"},"creationTimestamp":{"description":"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"deletionGracePeriodSeconds":{"description":"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.","type":"integer","format":"int64"},"deletionTimestamp":{"description":"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"finalizers":{"description":"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.","type":"array","items":{"type":"string","default":""},"x-kubernetes-patch-strategy":"merge"},"generateName":{"description":"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency","type":"string"},"generation":{"description":"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.","type":"integer","format":"int64"},"labels":{"description":"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels","type":"object","additionalProperties":{"type":"string","default":""}},"managedFields":{"description":"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"}},"name":{"description":"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string"},"namespace":{"description":"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces","type":"string"},"ownerReferences":{"description":"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"},"x-kubernetes-patch-merge-key":"uid","x-kubernetes-patch-strategy":"merge"},"resourceVersion":{"description":"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"},"uid":{"description":"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference":{"description":"OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.","type":"object","required":["apiVersion","kind","name","uid"],"properties":{"apiVersion":{"description":"API version of the referent.","type":"string","default":""},"blockOwnerDeletion":{"description":"If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.","type":"boolean"},"controller":{"description":"If true, this reference points to the managing controller.","type":"boolean"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string","default":""},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.Patch":{"description":"Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions":{"description":"Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.","type":"object","properties":{"resourceVersion":{"description":"Specifies the target ResourceVersion","type":"string"},"uid":{"description":"Specifies the target UID.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Status":{"description":"Status is a return value for calls that don't return other objects.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"code":{"description":"Suggested HTTP return code for this status, 0 if not set.","type":"integer","format":"int32"},"details":{"description":"Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"message":{"description":"A human-readable description of the status of this operation.","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"},"reason":{"description":"A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.","type":"string"},"status":{"description":"Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"Status","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause":{"description":"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.","type":"object","properties":{"field":{"description":"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"","type":"string"},"message":{"description":"A human-readable description of the cause of the error. This field may be presented as-is to a reader.","type":"string"},"reason":{"description":"A machine-readable description of the cause of the error. If this value is empty there is no information available.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails":{"description":"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.","type":"object","properties":{"causes":{"description":"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"}},"group":{"description":"The group attribute of the resource associated with the status StatusReason.","type":"string"},"kind":{"description":"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).","type":"string"},"retryAfterSeconds":{"description":"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.","type":"integer","format":"int32"},"uid":{"description":"UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Time":{"description":"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.","type":"string","format":"date-time"},"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent":{"description":"Event represents a single event to a watched resource.","type":"object","required":["type","object"],"properties":{"object":{"description":"Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension"},"type":{"type":"string","default":""}},"x-kubernetes-group-version-kind":[{"group":"","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1"},{"group":"apps","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta2"},{"group":"batch","kind":"WatchEvent","version":"v1"},{"group":"batch","kind":"WatchEvent","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"extensions","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"policy","kind":"WatchEvent","version":"v1"},{"group":"policy","kind":"WatchEvent","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.runtime.RawExtension":{"description":"RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)","type":"object"},"io.k8s.apimachinery.pkg.util.intstr.IntOrString":{"description":"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.","type":"string","format":"int-or-string"}}}} +{"openapi":"3.0.0","info":{"title":"Kubernetes","version":"v1.24.0"},"paths":{"/apis/batch/v1/":{"get":{"tags":["batch_v1"],"description":"get available resources","operationId":"getBatchV1APIResources","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}}}}}}},"/apis/batch/v1/cronjobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1CronJobForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/jobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind Job","operationId":"listBatchV1JobForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1NamespacedCronJob","parameters":[{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}},{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobList"}}}}}},"post":{"tags":["batch_v1"],"description":"create a CronJob","operationId":"createBatchV1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete collection of CronJob","operationId":"deleteBatchV1CollectionNamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"parameters":[{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1"],"description":"read the specified CronJob","operationId":"readBatchV1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"put":{"tags":["batch_v1"],"description":"replace the specified CronJob","operationId":"replaceBatchV1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete a CronJob","operationId":"deleteBatchV1NamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update the specified CronJob","operationId":"patchBatchV1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status":{"get":{"tags":["batch_v1"],"description":"read status of the specified CronJob","operationId":"readBatchV1NamespacedCronJobStatus","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"put":{"tags":["batch_v1"],"description":"replace status of the specified CronJob","operationId":"replaceBatchV1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update status of the specified CronJob","operationId":"patchBatchV1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/jobs":{"get":{"tags":["batch_v1"],"description":"list or watch objects of kind Job","operationId":"listBatchV1NamespacedJob","parameters":[{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}},{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobList"}}}}}},"post":{"tags":["batch_v1"],"description":"create a Job","operationId":"createBatchV1NamespacedJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete collection of Job","operationId":"deleteBatchV1CollectionNamespacedJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"parameters":[{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/jobs/{name}":{"get":{"tags":["batch_v1"],"description":"read the specified Job","operationId":"readBatchV1NamespacedJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"put":{"tags":["batch_v1"],"description":"replace the specified Job","operationId":"replaceBatchV1NamespacedJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"delete":{"tags":["batch_v1"],"description":"delete a Job","operationId":"deleteBatchV1NamespacedJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update the specified Job","operationId":"patchBatchV1NamespacedJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the Job","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status":{"get":{"tags":["batch_v1"],"description":"read status of the specified Job","operationId":"readBatchV1NamespacedJobStatus","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"put":{"tags":["batch_v1"],"description":"replace status of the specified Job","operationId":"replaceBatchV1NamespacedJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"patch":{"tags":["batch_v1"],"description":"partially update status of the specified Job","operationId":"patchBatchV1NamespacedJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the Job","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1/watch/cronjobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1CronJobListForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/jobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1JobListForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1NamespacedCronJobList","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1"],"description":"watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.","operationId":"watchBatchV1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/jobs":{"get":{"tags":["batch_v1"],"description":"watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1NamespacedJobList","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1/watch/namespaces/{namespace}/jobs/{name}":{"get":{"tags":["batch_v1"],"description":"watch changes to an object of kind Job. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.","operationId":"watchBatchV1NamespacedJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"name","in":"path","description":"name of the Job","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]}},"components":{"schemas":{"io.k8s.api.batch.v1.CronJob":{"description":"CronJob represents the configuration of a single cron job.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobSpec"},"status":{"description":"Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJobStatus"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJob","version":"v1"}]},"io.k8s.api.batch.v1.CronJobList":{"description":"CronJobList is a collection of cron jobs.","type":"object","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"items":{"description":"items is the list of CronJobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.CronJob"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJobList","version":"v1"}]},"io.k8s.api.batch.v1.CronJobSpec":{"description":"CronJobSpec describes how the job execution will look like and when it will actually run.","type":"object","required":["schedule","jobTemplate"],"properties":{"concurrencyPolicy":{"description":"Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one\n\n","type":"string"},"failedJobsHistoryLimit":{"description":"The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.","type":"integer","format":"int32"},"jobTemplate":{"description":"Specifies the job that will be created when executing a CronJob.","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobTemplateSpec"},"schedule":{"description":"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.","type":"string","default":""},"startingDeadlineSeconds":{"description":"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.","type":"integer","format":"int64"},"successfulJobsHistoryLimit":{"description":"The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.","type":"integer","format":"int32"},"suspend":{"description":"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.","type":"boolean"}}},"io.k8s.api.batch.v1.CronJobStatus":{"description":"CronJobStatus represents the current state of a cron job.","type":"object","properties":{"active":{"description":"A list of pointers to currently running jobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectReference"},"x-kubernetes-list-type":"atomic"},"lastScheduleTime":{"description":"Information when was the last time the job was successfully scheduled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"lastSuccessfulTime":{"description":"Information when was the last time the job successfully completed.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.api.batch.v1.Job":{"description":"Job represents the configuration of a single job.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobSpec"},"status":{"description":"Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobStatus"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"Job","version":"v1"}]},"io.k8s.api.batch.v1.JobCondition":{"description":"JobCondition describes current state of a job.","type":"object","required":["type","status"],"properties":{"lastProbeTime":{"description":"Last time the condition was checked.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"lastTransitionTime":{"description":"Last time the condition transit from one status to another.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"message":{"description":"Human readable message indicating details about last transition.","type":"string"},"reason":{"description":"(brief) reason for the condition's last transition.","type":"string"},"status":{"description":"Status of the condition, one of True, False, Unknown.","type":"string","default":""},"type":{"description":"Type of job condition, Complete or Failed.\n\n","type":"string","default":""}}},"io.k8s.api.batch.v1.JobList":{"description":"JobList is a collection of jobs.","type":"object","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"items":{"description":"items is the list of Jobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.Job"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"JobList","version":"v1"}]},"io.k8s.api.batch.v1.JobSpec":{"description":"JobSpec describes how the job execution will look like.","type":"object","required":["template"],"properties":{"activeDeadlineSeconds":{"description":"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.","type":"integer","format":"int64"},"backoffLimit":{"description":"Specifies the number of retries before marking this job failed. Defaults to 6","type":"integer","format":"int32"},"completionMode":{"description":"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.","type":"string"},"completions":{"description":"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"manualSelector":{"description":"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector","type":"boolean"},"parallelism":{"description":"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"selector":{"description":"A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"suspend":{"description":"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).","type":"boolean"},"template":{"description":"Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodTemplateSpec"},"ttlSecondsAfterFinished":{"description":"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.","type":"integer","format":"int32"}}},"io.k8s.api.batch.v1.JobStatus":{"description":"JobStatus represents the current state of a Job.","type":"object","properties":{"active":{"description":"The number of pending and running pods.","type":"integer","format":"int32"},"completedIndexes":{"description":"CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".","type":"string"},"completionTime":{"description":"Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"conditions":{"description":"The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobCondition"},"x-kubernetes-list-type":"atomic","x-kubernetes-patch-merge-key":"type","x-kubernetes-patch-strategy":"merge"},"failed":{"description":"The number of pods which reached phase Failed.","type":"integer","format":"int32"},"ready":{"description":"The number of pods which have a Ready condition.\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobReadyPods is enabled (disabled by default).","type":"integer","format":"int32"},"startTime":{"description":"Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"succeeded":{"description":"The number of pods which reached phase Succeeded.","type":"integer","format":"int32"},"uncountedTerminatedPods":{"description":"UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is beta-level. The job controller only makes use of this field when the feature gate JobTrackingWithFinalizers is enabled (enabled by default). Old jobs might not be tracked using this field, in which case the field remains null.","$ref":"#/components/schemas/io.k8s.api.batch.v1.UncountedTerminatedPods"}}},"io.k8s.api.batch.v1.JobTemplateSpec":{"description":"JobTemplateSpec describes the data a Job should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobSpec"}}},"io.k8s.api.batch.v1.UncountedTerminatedPods":{"description":"UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.","type":"object","properties":{"failed":{"description":"Failed holds UIDs of failed Pods.","type":"array","items":{"type":"string","default":""},"x-kubernetes-list-type":"set"},"succeeded":{"description":"Succeeded holds UIDs of succeeded Pods.","type":"array","items":{"type":"string","default":""},"x-kubernetes-list-type":"set"}}},"io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource":{"description":"Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).","type":"integer","format":"int32"},"readOnly":{"description":"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"boolean"},"volumeID":{"description":"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string","default":""}}},"io.k8s.api.core.v1.Affinity":{"description":"Affinity is a group of affinity scheduling rules.","type":"object","properties":{"nodeAffinity":{"description":"Describes node affinity scheduling rules for the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeAffinity"},"podAffinity":{"description":"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinity"},"podAntiAffinity":{"description":"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAntiAffinity"}}},"io.k8s.api.core.v1.AzureDiskVolumeSource":{"description":"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","type":"object","required":["diskName","diskURI"],"properties":{"cachingMode":{"description":"cachingMode is the Host Caching mode: None, Read Only, Read Write.","type":"string"},"diskName":{"description":"diskName is the Name of the data disk in the blob storage","type":"string","default":""},"diskURI":{"description":"diskURI is the URI of data disk in the blob storage","type":"string","default":""},"fsType":{"description":"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"kind":{"description":"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"}}},"io.k8s.api.core.v1.AzureFileVolumeSource":{"description":"AzureFile represents an Azure File Service mount on the host and bind mount to the pod.","type":"object","required":["secretName","shareName"],"properties":{"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretName":{"description":"secretName is the name of secret that contains Azure Storage Account Name and Key","type":"string","default":""},"shareName":{"description":"shareName is the azure share Name","type":"string","default":""}}},"io.k8s.api.core.v1.CSIVolumeSource":{"description":"Represents a source location of a volume to mount, managed by an external CSI driver","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.","type":"string","default":""},"fsType":{"description":"fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.","type":"string"},"nodePublishSecretRef":{"description":"nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"readOnly":{"description":"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).","type":"boolean"},"volumeAttributes":{"description":"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.","type":"object","additionalProperties":{"type":"string","default":""}}}},"io.k8s.api.core.v1.Capabilities":{"description":"Adds and removes POSIX capabilities from running containers.","type":"object","properties":{"add":{"description":"Added capabilities","type":"array","items":{"type":"string","default":""}},"drop":{"description":"Removed capabilities","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.CephFSVolumeSource":{"description":"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["monitors"],"properties":{"monitors":{"description":"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"path":{"description":"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /","type":"string"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"boolean"},"secretFile":{"description":"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"},"secretRef":{"description":"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.CinderVolumeSource":{"description":"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"boolean"},"secretRef":{"description":"secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeID":{"description":"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string","default":""}}},"io.k8s.api.core.v1.ConfigMapEnvSource":{"description":"ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapKeySelector":{"description":"Selects a key from a ConfigMap.","type":"object","required":["key"],"properties":{"key":{"description":"The key to select.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ConfigMapProjection":{"description":"Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapVolumeSource":{"description":"Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.Container":{"description":"A single application container that you want to run within a pod.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Actions that the management system should take in response to container lifecycle events. Cannot be updated.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.","type":"string","default":""},"ports":{"description":"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.ContainerPort":{"description":"ContainerPort represents a network port in a single container.","type":"object","required":["containerPort"],"properties":{"containerPort":{"description":"Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.","type":"integer","format":"int32","default":0},"hostIP":{"description":"What host IP to bind the external port to.","type":"string"},"hostPort":{"description":"Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.","type":"integer","format":"int32"},"name":{"description":"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.","type":"string"},"protocol":{"description":"Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\n\n","type":"string","default":"TCP"}}},"io.k8s.api.core.v1.DownwardAPIProjection":{"description":"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.","type":"object","properties":{"items":{"description":"Items is a list of DownwardAPIVolume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.DownwardAPIVolumeFile":{"description":"DownwardAPIVolumeFile represents information to create the file containing the pod field","type":"object","required":["path"],"properties":{"fieldRef":{"description":"Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"mode":{"description":"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'","type":"string","default":""},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"}}},"io.k8s.api.core.v1.DownwardAPIVolumeSource":{"description":"DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"Items is a list of downward API volume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.EmptyDirVolumeSource":{"description":"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.","type":"object","properties":{"medium":{"description":"medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","type":"string"},"sizeLimit":{"description":"sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}},"io.k8s.api.core.v1.EnvFromSource":{"description":"EnvFromSource represents the source of a set of ConfigMaps","type":"object","properties":{"configMapRef":{"description":"The ConfigMap to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapEnvSource"},"prefix":{"description":"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.","type":"string"},"secretRef":{"description":"The Secret to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretEnvSource"}}},"io.k8s.api.core.v1.EnvVar":{"description":"EnvVar represents an environment variable present in a Container.","type":"object","required":["name"],"properties":{"name":{"description":"Name of the environment variable. Must be a C_IDENTIFIER.","type":"string","default":""},"value":{"description":"Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".","type":"string"},"valueFrom":{"description":"Source for the environment variable's value. Cannot be used if value is not empty.","$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVarSource"}}},"io.k8s.api.core.v1.EnvVarSource":{"description":"EnvVarSource represents a source for the value of an EnvVar.","type":"object","properties":{"configMapKeyRef":{"description":"Selects a key of a ConfigMap.","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapKeySelector"},"fieldRef":{"description":"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"},"secretKeyRef":{"description":"Selects a key of a secret in the pod's namespace","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretKeySelector"}}},"io.k8s.api.core.v1.EphemeralContainer":{"description":"An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Lifecycle is not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.","type":"string","default":""},"ports":{"description":"Ports are not allowed for ephemeral containers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"targetContainerName":{"description":"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.","type":"string"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.EphemeralVolumeSource":{"description":"Represents an ephemeral volume that is handled by a normal storage driver.","type":"object","properties":{"volumeClaimTemplate":{"description":"Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimTemplate"}}},"io.k8s.api.core.v1.ExecAction":{"description":"ExecAction describes a \"run in container\" action.","type":"object","properties":{"command":{"description":"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FCVolumeSource":{"description":"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"lun":{"description":"lun is Optional: FC target lun number","type":"integer","format":"int32"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"targetWWNs":{"description":"targetWWNs is Optional: FC target worldwide names (WWNs)","type":"array","items":{"type":"string","default":""}},"wwids":{"description":"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FlexVolumeSource":{"description":"FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the driver to use for this volume.","type":"string","default":""},"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.","type":"string"},"options":{"description":"options is Optional: this field holds extra command options if any.","type":"object","additionalProperties":{"type":"string","default":""}},"readOnly":{"description":"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"}}},"io.k8s.api.core.v1.FlockerVolumeSource":{"description":"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.","type":"object","properties":{"datasetName":{"description":"datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated","type":"string"},"datasetUUID":{"description":"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset","type":"string"}}},"io.k8s.api.core.v1.GCEPersistentDiskVolumeSource":{"description":"Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.","type":"object","required":["pdName"],"properties":{"fsType":{"description":"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"integer","format":"int32"},"pdName":{"description":"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string","default":""},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"boolean"}}},"io.k8s.api.core.v1.GRPCAction":{"type":"object","required":["port"],"properties":{"port":{"description":"Port number of the gRPC service. Number must be in the range 1 to 65535.","type":"integer","format":"int32","default":0},"service":{"description":"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.","type":"string","default":""}}},"io.k8s.api.core.v1.GitRepoVolumeSource":{"description":"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","type":"object","required":["repository"],"properties":{"directory":{"description":"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.","type":"string"},"repository":{"description":"repository is the URL","type":"string","default":""},"revision":{"description":"revision is the commit hash for the specified revision.","type":"string"}}},"io.k8s.api.core.v1.GlusterfsVolumeSource":{"description":"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["endpoints","path"],"properties":{"endpoints":{"description":"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"path":{"description":"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"readOnly":{"description":"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"boolean"}}},"io.k8s.api.core.v1.HTTPGetAction":{"description":"HTTPGetAction describes an action based on HTTP Get requests.","type":"object","required":["port"],"properties":{"host":{"description":"Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.","type":"string"},"httpHeaders":{"description":"Custom headers to set in the request. HTTP allows repeated headers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPHeader"}},"path":{"description":"Path to access on the HTTP server.","type":"string"},"port":{"description":"Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"},"scheme":{"description":"Scheme to use for connecting to the host. Defaults to HTTP.\n\n","type":"string"}}},"io.k8s.api.core.v1.HTTPHeader":{"description":"HTTPHeader describes a custom header to be used in HTTP probes","type":"object","required":["name","value"],"properties":{"name":{"description":"The header field name","type":"string","default":""},"value":{"description":"The header field value","type":"string","default":""}}},"io.k8s.api.core.v1.HostAlias":{"description":"HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.","type":"object","properties":{"hostnames":{"description":"Hostnames for the above IP address.","type":"array","items":{"type":"string","default":""}},"ip":{"description":"IP address of the host file entry.","type":"string"}}},"io.k8s.api.core.v1.HostPathVolumeSource":{"description":"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.","type":"object","required":["path"],"properties":{"path":{"description":"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string","default":""},"type":{"description":"type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string"}}},"io.k8s.api.core.v1.ISCSIVolumeSource":{"description":"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.","type":"object","required":["targetPortal","iqn","lun"],"properties":{"chapAuthDiscovery":{"description":"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication","type":"boolean"},"chapAuthSession":{"description":"chapAuthSession defines whether support iSCSI Session CHAP authentication","type":"boolean"},"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi","type":"string"},"initiatorName":{"description":"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.","type":"string"},"iqn":{"description":"iqn is the target iSCSI Qualified Name.","type":"string","default":""},"iscsiInterface":{"description":"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).","type":"string"},"lun":{"description":"lun represents iSCSI Target Lun number.","type":"integer","format":"int32","default":0},"portals":{"description":"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"array","items":{"type":"string","default":""}},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.","type":"boolean"},"secretRef":{"description":"secretRef is the CHAP Secret for iSCSI target and initiator authentication","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"targetPortal":{"description":"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"string","default":""}}},"io.k8s.api.core.v1.KeyToPath":{"description":"Maps a string key to a path within a volume.","type":"object","required":["key","path"],"properties":{"key":{"description":"key is the key to project.","type":"string","default":""},"mode":{"description":"mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.","type":"string","default":""}}},"io.k8s.api.core.v1.Lifecycle":{"description":"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.","type":"object","properties":{"postStart":{"description":"PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"},"preStop":{"description":"PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"}}},"io.k8s.api.core.v1.LifecycleHandler":{"description":"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"tcpSocket":{"description":"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"}}},"io.k8s.api.core.v1.LocalObjectReference":{"description":"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NFSVolumeSource":{"description":"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.","type":"object","required":["server","path"],"properties":{"path":{"description":"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""},"readOnly":{"description":"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"boolean"},"server":{"description":"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""}}},"io.k8s.api.core.v1.NodeAffinity":{"description":"Node affinity is a group of node affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PreferredSchedulingTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelector"}}},"io.k8s.api.core.v1.NodeSelector":{"description":"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.","type":"object","required":["nodeSelectorTerms"],"properties":{"nodeSelectorTerms":{"description":"Required. A list of node selector terms. The terms are ORed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NodeSelectorRequirement":{"description":"A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"The label key that the selector applies to.","type":"string","default":""},"operator":{"description":"Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\n","type":"string","default":""},"values":{"description":"An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.NodeSelectorTerm":{"description":"A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.","type":"object","properties":{"matchExpressions":{"description":"A list of node selector requirements by node's labels.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}},"matchFields":{"description":"A list of node selector requirements by node's fields.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectFieldSelector":{"description":"ObjectFieldSelector selects an APIVersioned field of an object.","type":"object","required":["fieldPath"],"properties":{"apiVersion":{"description":"Version of the schema the FieldPath is written in terms of, defaults to \"v1\".","type":"string"},"fieldPath":{"description":"Path of the field to select in the specified API version.","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectReference":{"description":"ObjectReference contains enough information to let you inspect or modify the referred object.","type":"object","properties":{"apiVersion":{"description":"API version of the referent.","type":"string"},"fieldPath":{"description":"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.","type":"string"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"namespace":{"description":"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/","type":"string"},"resourceVersion":{"description":"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.PersistentVolumeClaimSpec":{"description":"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes","type":"object","properties":{"accessModes":{"description":"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1","type":"array","items":{"type":"string","default":""}},"dataSource":{"description":"dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"dataSourceRef":{"description":"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"resources":{"description":"resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"selector":{"description":"selector is a label query over volumes to consider for binding.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"storageClassName":{"description":"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1","type":"string"},"volumeMode":{"description":"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.","type":"string"},"volumeName":{"description":"volumeName is the binding reference to the PersistentVolume backing this claim.","type":"string"}}},"io.k8s.api.core.v1.PersistentVolumeClaimTemplate":{"description":"PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.","type":"object","required":["spec"],"properties":{"metadata":{"description":"May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimSpec"}}},"io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource":{"description":"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).","type":"object","required":["claimName"],"properties":{"claimName":{"description":"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","type":"string","default":""},"readOnly":{"description":"readOnly Will force the ReadOnly setting in VolumeMounts. Default false.","type":"boolean"}}},"io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource":{"description":"Represents a Photon Controller persistent disk resource.","type":"object","required":["pdID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"pdID":{"description":"pdID is the ID that identifies Photon Controller persistent disk","type":"string","default":""}}},"io.k8s.api.core.v1.PodAffinity":{"description":"Pod affinity is a group of inter pod affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodAffinityTerm":{"description":"Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running","type":"object","required":["topologyKey"],"properties":{"labelSelector":{"description":"A label query over a set of resources, in this case pods.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaceSelector":{"description":"A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaces":{"description":"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"","type":"array","items":{"type":"string","default":""}},"topologyKey":{"description":"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.","type":"string","default":""}}},"io.k8s.api.core.v1.PodAntiAffinity":{"description":"Pod anti affinity is a group of inter pod anti affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodDNSConfig":{"description":"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.","type":"object","properties":{"nameservers":{"description":"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.","type":"array","items":{"type":"string","default":""}},"options":{"description":"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfigOption"}},"searches":{"description":"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.PodDNSConfigOption":{"description":"PodDNSConfigOption defines DNS resolver options of a pod.","type":"object","properties":{"name":{"description":"Required.","type":"string"},"value":{"type":"string"}}},"io.k8s.api.core.v1.PodOS":{"description":"PodOS defines the OS parameters of a pod.","type":"object","required":["name"],"properties":{"name":{"description":"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null","type":"string","default":""}}},"io.k8s.api.core.v1.PodReadinessGate":{"description":"PodReadinessGate contains the reference to a pod condition","type":"object","required":["conditionType"],"properties":{"conditionType":{"description":"ConditionType refers to a condition in the pod's condition list with matching type.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.PodSecurityContext":{"description":"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.","type":"object","properties":{"fsGroup":{"description":"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"fsGroupChangePolicy":{"description":"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"supplementalGroups":{"description":"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"type":"integer","format":"int64","default":0}},"sysctls":{"description":"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Sysctl"}},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.PodSpec":{"description":"PodSpec is a description of a pod.","type":"object","required":["containers"],"properties":{"activeDeadlineSeconds":{"description":"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.","type":"integer","format":"int64"},"affinity":{"description":"If specified, the pod's scheduling constraints","$ref":"#/components/schemas/io.k8s.api.core.v1.Affinity"},"automountServiceAccountToken":{"description":"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.","type":"boolean"},"containers":{"description":"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"dnsConfig":{"description":"Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfig"},"dnsPolicy":{"description":"Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\n","type":"string"},"enableServiceLinks":{"description":"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.","type":"boolean"},"ephemeralContainers":{"description":"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralContainer"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"hostAliases":{"description":"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HostAlias"},"x-kubernetes-patch-merge-key":"ip","x-kubernetes-patch-strategy":"merge"},"hostIPC":{"description":"Use the host's ipc namespace. Optional: Default to false.","type":"boolean"},"hostNetwork":{"description":"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.","type":"boolean"},"hostPID":{"description":"Use the host's pid namespace. Optional: Default to false.","type":"boolean"},"hostname":{"description":"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.","type":"string"},"imagePullSecrets":{"description":"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"initContainers":{"description":"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"nodeName":{"description":"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.","type":"string"},"nodeSelector":{"description":"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/","type":"object","additionalProperties":{"type":"string","default":""},"x-kubernetes-map-type":"atomic"},"os":{"description":"Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature","$ref":"#/components/schemas/io.k8s.api.core.v1.PodOS"},"overhead":{"description":"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"preemptionPolicy":{"description":"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.","type":"string"},"priority":{"description":"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.","type":"integer","format":"int32"},"priorityClassName":{"description":"If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.","type":"string"},"readinessGates":{"description":"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodReadinessGate"}},"restartPolicy":{"description":"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n","type":"string"},"runtimeClassName":{"description":"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.","type":"string"},"schedulerName":{"description":"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.","type":"string"},"securityContext":{"description":"SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodSecurityContext"},"serviceAccount":{"description":"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.","type":"string"},"serviceAccountName":{"description":"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/","type":"string"},"setHostnameAsFQDN":{"description":"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.","type":"boolean"},"shareProcessNamespace":{"description":"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.","type":"boolean"},"subdomain":{"description":"If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.","type":"string"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.","type":"integer","format":"int64"},"tolerations":{"description":"If specified, the pod's tolerations.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Toleration"}},"topologySpreadConstraints":{"description":"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.TopologySpreadConstraint"},"x-kubernetes-list-map-keys":["topologyKey","whenUnsatisfiable"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"topologyKey","x-kubernetes-patch-strategy":"merge"},"volumes":{"description":"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Volume"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge,retainKeys"}}},"io.k8s.api.core.v1.PodTemplateSpec":{"description":"PodTemplateSpec describes the data a pod should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodSpec"}}},"io.k8s.api.core.v1.PortworxVolumeSource":{"description":"PortworxVolumeSource represents a Portworx volume resource.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"volumeID":{"description":"volumeID uniquely identifies a Portworx volume","type":"string","default":""}}},"io.k8s.api.core.v1.PreferredSchedulingTerm":{"description":"An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).","type":"object","required":["weight","preference"],"properties":{"preference":{"description":"A node selector term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"},"weight":{"description":"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.Probe":{"description":"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"failureThreshold":{"description":"Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.","type":"integer","format":"int32"},"grpc":{"description":"GRPC specifies an action involving a GRPC port.","$ref":"#/components/schemas/io.k8s.api.core.v1.GRPCAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"initialDelaySeconds":{"description":"Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"},"periodSeconds":{"description":"How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.","type":"integer","format":"int32"},"successThreshold":{"description":"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.","type":"integer","format":"int32"},"tcpSocket":{"description":"TCPSocket specifies an action involving a TCP port.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.","type":"integer","format":"int64"},"timeoutSeconds":{"description":"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"}}},"io.k8s.api.core.v1.ProjectedVolumeSource":{"description":"Represents a projected volume source","type":"object","properties":{"defaultMode":{"description":"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"sources":{"description":"sources is the list of volume projections","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeProjection"}}}},"io.k8s.api.core.v1.QuobyteVolumeSource":{"description":"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.","type":"object","required":["registry","volume"],"properties":{"group":{"description":"group to map volume access to Default is no group","type":"string"},"readOnly":{"description":"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.","type":"boolean"},"registry":{"description":"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes","type":"string","default":""},"tenant":{"description":"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin","type":"string"},"user":{"description":"user to map volume access to Defaults to serivceaccount user","type":"string"},"volume":{"description":"volume is a string that references an already created Quobyte volume by name.","type":"string","default":""}}},"io.k8s.api.core.v1.RBDVolumeSource":{"description":"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.","type":"object","required":["monitors","image"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd","type":"string"},"image":{"description":"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string","default":""},"keyring":{"description":"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"monitors":{"description":"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"pool":{"description":"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"boolean"},"secretRef":{"description":"secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.ResourceFieldSelector":{"description":"ResourceFieldSelector represents container resources (cpu, memory) and their output format","type":"object","required":["resource"],"properties":{"containerName":{"description":"Container name: required for volumes, optional for env vars","type":"string"},"divisor":{"description":"Specifies the output format of the exposed resources, defaults to \"1\"","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"},"resource":{"description":"Required: resource to select","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ResourceRequirements":{"description":"ResourceRequirements describes the compute resource requirements.","type":"object","properties":{"limits":{"description":"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"requests":{"description":"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}}},"io.k8s.api.core.v1.SELinuxOptions":{"description":"SELinuxOptions are the labels to be applied to the container","type":"object","properties":{"level":{"description":"Level is SELinux level label that applies to the container.","type":"string"},"role":{"description":"Role is a SELinux role label that applies to the container.","type":"string"},"type":{"description":"Type is a SELinux type label that applies to the container.","type":"string"},"user":{"description":"User is a SELinux user label that applies to the container.","type":"string"}}},"io.k8s.api.core.v1.ScaleIOVolumeSource":{"description":"ScaleIOVolumeSource represents a persistent ScaleIO volume","type":"object","required":["gateway","system","secretRef"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".","type":"string"},"gateway":{"description":"gateway is the host address of the ScaleIO API Gateway.","type":"string","default":""},"protectionDomain":{"description":"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"sslEnabled":{"description":"sslEnabled Flag enable/disable SSL communication with Gateway, default false","type":"boolean"},"storageMode":{"description":"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.","type":"string"},"storagePool":{"description":"storagePool is the ScaleIO Storage Pool associated with the protection domain.","type":"string"},"system":{"description":"system is the name of the storage system as configured in ScaleIO.","type":"string","default":""},"volumeName":{"description":"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.","type":"string"}}},"io.k8s.api.core.v1.SeccompProfile":{"description":"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.","type":"object","required":["type"],"properties":{"localhostProfile":{"description":"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".","type":"string"},"type":{"description":"type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.\n\n","type":"string","default":""}},"x-kubernetes-unions":[{"discriminator":"type","fields-to-discriminateBy":{"localhostProfile":"LocalhostProfile"}}]},"io.k8s.api.core.v1.SecretEnvSource":{"description":"SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretKeySelector":{"description":"SecretKeySelector selects a key of a Secret.","type":"object","required":["key"],"properties":{"key":{"description":"The key of the secret to select from. Must be a valid secret key.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.SecretProjection":{"description":"Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional field specify whether the Secret or its key must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretVolumeSource":{"description":"Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"optional":{"description":"optional field specify whether the Secret or its keys must be defined","type":"boolean"},"secretName":{"description":"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","type":"string"}}},"io.k8s.api.core.v1.SecurityContext":{"description":"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.","type":"object","properties":{"allowPrivilegeEscalation":{"description":"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"capabilities":{"description":"The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.Capabilities"},"privileged":{"description":"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"procMount":{"description":"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"readOnlyRootFilesystem":{"description":"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by this container. If seccomp options are provided at both the pod \u0026 container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.ServiceAccountTokenProjection":{"description":"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).","type":"object","required":["path"],"properties":{"audience":{"description":"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.","type":"string"},"expirationSeconds":{"description":"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.","type":"integer","format":"int64"},"path":{"description":"path is the path relative to the mount point of the file to project the token into.","type":"string","default":""}}},"io.k8s.api.core.v1.StorageOSVolumeSource":{"description":"Represents a StorageOS persistent volume resource.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeName":{"description":"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.","type":"string"},"volumeNamespace":{"description":"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.","type":"string"}}},"io.k8s.api.core.v1.Sysctl":{"description":"Sysctl defines a kernel parameter to be set","type":"object","required":["name","value"],"properties":{"name":{"description":"Name of a property to set","type":"string","default":""},"value":{"description":"Value of a property to set","type":"string","default":""}}},"io.k8s.api.core.v1.TCPSocketAction":{"description":"TCPSocketAction describes an action based on opening a socket","type":"object","required":["port"],"properties":{"host":{"description":"Optional: Host name to connect to, defaults to the pod IP.","type":"string"},"port":{"description":"Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}}},"io.k8s.api.core.v1.Toleration":{"description":"The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.","type":"object","properties":{"effect":{"description":"Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n\n","type":"string"},"key":{"description":"Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.","type":"string"},"operator":{"description":"Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.\n\n","type":"string"},"tolerationSeconds":{"description":"TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.","type":"integer","format":"int64"},"value":{"description":"Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.","type":"string"}}},"io.k8s.api.core.v1.TopologySpreadConstraint":{"description":"TopologySpreadConstraint specifies how to spread matching pods among the given topology.","type":"object","required":["maxSkew","topologyKey","whenUnsatisfiable"],"properties":{"labelSelector":{"description":"LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"maxSkew":{"description":"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.","type":"integer","format":"int32","default":0},"topologyKey":{"description":"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.","type":"string","default":""},"whenUnsatisfiable":{"description":"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.TypedLocalObjectReference":{"description":"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.","type":"object","required":["kind","name"],"properties":{"apiGroup":{"description":"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.","type":"string"},"kind":{"description":"Kind is the type of resource being referenced","type":"string","default":""},"name":{"description":"Name is the name of resource being referenced","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.Volume":{"description":"Volume represents a named volume in a pod that may be accessed by any container in the pod.","type":"object","required":["name"],"properties":{"awsElasticBlockStore":{"description":"awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","$ref":"#/components/schemas/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"},"azureDisk":{"description":"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureDiskVolumeSource"},"azureFile":{"description":"azureFile represents an Azure File Service mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureFileVolumeSource"},"cephfs":{"description":"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.CephFSVolumeSource"},"cinder":{"description":"cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.CinderVolumeSource"},"configMap":{"description":"configMap represents a configMap that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapVolumeSource"},"csi":{"description":"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).","$ref":"#/components/schemas/io.k8s.api.core.v1.CSIVolumeSource"},"downwardAPI":{"description":"downwardAPI represents downward API about the pod that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeSource"},"emptyDir":{"description":"emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.api.core.v1.EmptyDirVolumeSource"},"ephemeral":{"description":"ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.","$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralVolumeSource"},"fc":{"description":"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.FCVolumeSource"},"flexVolume":{"description":"flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","$ref":"#/components/schemas/io.k8s.api.core.v1.FlexVolumeSource"},"flocker":{"description":"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running","$ref":"#/components/schemas/io.k8s.api.core.v1.FlockerVolumeSource"},"gcePersistentDisk":{"description":"gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","$ref":"#/components/schemas/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"},"gitRepo":{"description":"gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","$ref":"#/components/schemas/io.k8s.api.core.v1.GitRepoVolumeSource"},"glusterfs":{"description":"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.GlusterfsVolumeSource"},"hostPath":{"description":"hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","$ref":"#/components/schemas/io.k8s.api.core.v1.HostPathVolumeSource"},"iscsi":{"description":"iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.ISCSIVolumeSource"},"name":{"description":"name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string","default":""},"nfs":{"description":"nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","$ref":"#/components/schemas/io.k8s.api.core.v1.NFSVolumeSource"},"persistentVolumeClaim":{"description":"persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"},"photonPersistentDisk":{"description":"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"},"portworxVolume":{"description":"portworxVolume represents a portworx volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PortworxVolumeSource"},"projected":{"description":"projected items for all in one resources secrets, configmaps, and downward API","$ref":"#/components/schemas/io.k8s.api.core.v1.ProjectedVolumeSource"},"quobyte":{"description":"quobyte represents a Quobyte mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.QuobyteVolumeSource"},"rbd":{"description":"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.RBDVolumeSource"},"scaleIO":{"description":"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.ScaleIOVolumeSource"},"secret":{"description":"secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretVolumeSource"},"storageos":{"description":"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.StorageOSVolumeSource"},"vsphereVolume":{"description":"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"}}},"io.k8s.api.core.v1.VolumeDevice":{"description":"volumeDevice describes a mapping of a raw block device within a container.","type":"object","required":["name","devicePath"],"properties":{"devicePath":{"description":"devicePath is the path inside of the container that the device will be mapped to.","type":"string","default":""},"name":{"description":"name must match the name of a persistentVolumeClaim in the pod","type":"string","default":""}}},"io.k8s.api.core.v1.VolumeMount":{"description":"VolumeMount describes a mounting of a Volume within a container.","type":"object","required":["name","mountPath"],"properties":{"mountPath":{"description":"Path within the container at which the volume should be mounted. Must not contain ':'.","type":"string","default":""},"mountPropagation":{"description":"mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.","type":"string"},"name":{"description":"This must match the Name of a Volume.","type":"string","default":""},"readOnly":{"description":"Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.","type":"boolean"},"subPath":{"description":"Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).","type":"string"},"subPathExpr":{"description":"Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.","type":"string"}}},"io.k8s.api.core.v1.VolumeProjection":{"description":"Projection that may be projected along with other supported volume types","type":"object","properties":{"configMap":{"description":"configMap information about the configMap data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapProjection"},"downwardAPI":{"description":"downwardAPI information about the downwardAPI data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIProjection"},"secret":{"description":"secret information about the secret data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretProjection"},"serviceAccountToken":{"description":"serviceAccountToken is information about the serviceAccountToken data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ServiceAccountTokenProjection"}}},"io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource":{"description":"Represents a vSphere volume resource.","type":"object","required":["volumePath"],"properties":{"fsType":{"description":"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"storagePolicyID":{"description":"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.","type":"string"},"storagePolicyName":{"description":"storagePolicyName is the storage Policy Based Management (SPBM) profile name.","type":"string"},"volumePath":{"description":"volumePath is the path that identifies vSphere volume vmdk","type":"string","default":""}}},"io.k8s.api.core.v1.WeightedPodAffinityTerm":{"description":"The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)","type":"object","required":["weight","podAffinityTerm"],"properties":{"podAffinityTerm":{"description":"Required. A pod affinity term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"},"weight":{"description":"weight associated with matching the corresponding podAffinityTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.WindowsSecurityContextOptions":{"description":"WindowsSecurityContextOptions contain Windows-specific options and credentials.","type":"object","properties":{"gmsaCredentialSpec":{"description":"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.","type":"string"},"gmsaCredentialSpecName":{"description":"GMSACredentialSpecName is the name of the GMSA credential spec to use.","type":"string"},"hostProcess":{"description":"HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.","type":"boolean"},"runAsUserName":{"description":"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"string"}}},"io.k8s.apimachinery.pkg.api.resource.Quantity":{"description":"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.","type":"string"},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResource":{"description":"APIResource specifies the name of a resource and whether it is namespaced.","type":"object","required":["name","singularName","namespaced","kind","verbs"],"properties":{"categories":{"description":"categories is a list of the grouped resources this resource belongs to (e.g. 'all')","type":"array","items":{"type":"string","default":""}},"group":{"description":"group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".","type":"string"},"kind":{"description":"kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')","type":"string","default":""},"name":{"description":"name is the plural name of the resource.","type":"string","default":""},"namespaced":{"description":"namespaced indicates if a resource is namespaced or not.","type":"boolean","default":false},"shortNames":{"description":"shortNames is a list of suggested short names of the resource.","type":"array","items":{"type":"string","default":""}},"singularName":{"description":"singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.","type":"string","default":""},"storageVersionHash":{"description":"The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.","type":"string"},"verbs":{"description":"verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)","type":"array","items":{"type":"string","default":""}},"version":{"description":"version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList":{"description":"APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.","type":"object","required":["groupVersion","resources"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"groupVersion":{"description":"groupVersion is the group and version this APIResourceList is for.","type":"string","default":""},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"resources":{"description":"resources contains the name of the resources and if they are namespaced.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"}}},"x-kubernetes-group-version-kind":[{"group":"","kind":"APIResourceList","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions":{"description":"DeleteOptions may be provided when deleting an API object.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"dryRun":{"description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","type":"array","items":{"type":"string","default":""}},"gracePeriodSeconds":{"description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","type":"integer","format":"int64"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"orphanDependents":{"description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","type":"boolean"},"preconditions":{"description":"Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"},"propagationPolicy":{"description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta2"},{"group":"batch","kind":"DeleteOptions","version":"v1"},{"group":"batch","kind":"DeleteOptions","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"extensions","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"policy","kind":"DeleteOptions","version":"v1"},{"group":"policy","kind":"DeleteOptions","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1":{"description":"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector":{"description":"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.","type":"object","properties":{"matchExpressions":{"description":"matchExpressions is a list of label selector requirements. The requirements are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"}},"matchLabels":{"description":"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.","type":"object","additionalProperties":{"type":"string","default":""}}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement":{"description":"A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"key is the label key that the selector applies to.","type":"string","default":"","x-kubernetes-patch-merge-key":"key","x-kubernetes-patch-strategy":"merge"},"operator":{"description":"operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.","type":"string","default":""},"values":{"description":"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta":{"description":"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.","type":"object","properties":{"continue":{"description":"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.","type":"string"},"remainingItemCount":{"description":"remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.","type":"integer","format":"int64"},"resourceVersion":{"description":"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry":{"description":"ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.","type":"string"},"fieldsType":{"description":"FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"","type":"string"},"fieldsV1":{"description":"FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"},"manager":{"description":"Manager is an identifier of the workflow managing these fields.","type":"string"},"operation":{"description":"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.","type":"string"},"subresource":{"description":"Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.","type":"string"},"time":{"description":"Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta":{"description":"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.","type":"object","properties":{"annotations":{"description":"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations","type":"object","additionalProperties":{"type":"string","default":""}},"clusterName":{"description":"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.","type":"string"},"creationTimestamp":{"description":"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"deletionGracePeriodSeconds":{"description":"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.","type":"integer","format":"int64"},"deletionTimestamp":{"description":"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"finalizers":{"description":"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.","type":"array","items":{"type":"string","default":""},"x-kubernetes-patch-strategy":"merge"},"generateName":{"description":"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency","type":"string"},"generation":{"description":"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.","type":"integer","format":"int64"},"labels":{"description":"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels","type":"object","additionalProperties":{"type":"string","default":""}},"managedFields":{"description":"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"}},"name":{"description":"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string"},"namespace":{"description":"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces","type":"string"},"ownerReferences":{"description":"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"},"x-kubernetes-patch-merge-key":"uid","x-kubernetes-patch-strategy":"merge"},"resourceVersion":{"description":"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"},"uid":{"description":"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference":{"description":"OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.","type":"object","required":["apiVersion","kind","name","uid"],"properties":{"apiVersion":{"description":"API version of the referent.","type":"string","default":""},"blockOwnerDeletion":{"description":"If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.","type":"boolean"},"controller":{"description":"If true, this reference points to the managing controller.","type":"boolean"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string","default":""},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.Patch":{"description":"Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions":{"description":"Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.","type":"object","properties":{"resourceVersion":{"description":"Specifies the target ResourceVersion","type":"string"},"uid":{"description":"Specifies the target UID.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Status":{"description":"Status is a return value for calls that don't return other objects.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"code":{"description":"Suggested HTTP return code for this status, 0 if not set.","type":"integer","format":"int32"},"details":{"description":"Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"message":{"description":"A human-readable description of the status of this operation.","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"},"reason":{"description":"A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.","type":"string"},"status":{"description":"Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"Status","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause":{"description":"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.","type":"object","properties":{"field":{"description":"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"","type":"string"},"message":{"description":"A human-readable description of the cause of the error. This field may be presented as-is to a reader.","type":"string"},"reason":{"description":"A machine-readable description of the cause of the error. If this value is empty there is no information available.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails":{"description":"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.","type":"object","properties":{"causes":{"description":"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"}},"group":{"description":"The group attribute of the resource associated with the status StatusReason.","type":"string"},"kind":{"description":"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).","type":"string"},"retryAfterSeconds":{"description":"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.","type":"integer","format":"int32"},"uid":{"description":"UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Time":{"description":"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.","type":"string","format":"date-time"},"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent":{"description":"Event represents a single event to a watched resource.","type":"object","required":["type","object"],"properties":{"object":{"description":"Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension"},"type":{"type":"string","default":""}},"x-kubernetes-group-version-kind":[{"group":"","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1"},{"group":"apps","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta2"},{"group":"batch","kind":"WatchEvent","version":"v1"},{"group":"batch","kind":"WatchEvent","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"extensions","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"policy","kind":"WatchEvent","version":"v1"},{"group":"policy","kind":"WatchEvent","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.runtime.RawExtension":{"description":"RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)","type":"object"},"io.k8s.apimachinery.pkg.util.intstr.IntOrString":{"description":"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.","type":"string","format":"int-or-string"}}}} diff --git a/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1beta1.json b/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1beta1.json index 5a48fe7cfd985..cc4e8a0b81472 100644 --- a/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1beta1.json +++ b/staging/src/k8s.io/client-go/discovery/testdata/apis/batch/v1beta1.json @@ -1 +1 @@ -{"openapi":"3.0.0","info":{"title":"Kubernetes","version":"v1.24.0"},"paths":{"/apis/batch/v1beta1/":{"get":{"tags":["batch_v1beta1"],"description":"get available resources","operationId":"getBatchV1beta1APIResources","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}}}}}}},"/apis/batch/v1beta1/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1beta1CronJobForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1beta1/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1beta1NamespacedCronJob","parameters":[{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}},{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}}}}}},"post":{"tags":["batch_v1beta1"],"description":"create a CronJob","operationId":"createBatchV1beta1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"delete":{"tags":["batch_v1beta1"],"description":"delete collection of CronJob","operationId":"deleteBatchV1beta1CollectionNamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"parameters":[{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1beta1"],"description":"read the specified CronJob","operationId":"readBatchV1beta1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"put":{"tags":["batch_v1beta1"],"description":"replace the specified CronJob","operationId":"replaceBatchV1beta1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"delete":{"tags":["batch_v1beta1"],"description":"delete a CronJob","operationId":"deleteBatchV1beta1NamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"patch":{"tags":["batch_v1beta1"],"description":"partially update the specified CronJob","operationId":"patchBatchV1beta1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status":{"get":{"tags":["batch_v1beta1"],"description":"read status of the specified CronJob","operationId":"readBatchV1beta1NamespacedCronJobStatus","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"put":{"tags":["batch_v1beta1"],"description":"replace status of the specified CronJob","operationId":"replaceBatchV1beta1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"patch":{"tags":["batch_v1beta1"],"description":"partially update status of the specified CronJob","operationId":"patchBatchV1beta1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1beta1/watch/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1beta1CronJobListForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1beta1NamespacedCronJobList","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1beta1"],"description":"watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.","operationId":"watchBatchV1beta1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]}},"components":{"schemas":{"io.k8s.api.batch.v1.JobSpec":{"description":"JobSpec describes how the job execution will look like.","type":"object","required":["template"],"properties":{"activeDeadlineSeconds":{"description":"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.","type":"integer","format":"int64"},"backoffLimit":{"description":"Specifies the number of retries before marking this job failed. Defaults to 6","type":"integer","format":"int32"},"completionMode":{"description":"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.","type":"string"},"completions":{"description":"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"manualSelector":{"description":"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector","type":"boolean"},"parallelism":{"description":"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"selector":{"description":"A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"suspend":{"description":"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).","type":"boolean"},"template":{"description":"Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodTemplateSpec"},"ttlSecondsAfterFinished":{"description":"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.","type":"integer","format":"int32"}}},"io.k8s.api.batch.v1beta1.CronJob":{"description":"CronJob represents the configuration of a single cron job.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobSpec"},"status":{"description":"Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobStatus"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJob","version":"v1beta1"}]},"io.k8s.api.batch.v1beta1.CronJobList":{"description":"CronJobList is a collection of cron jobs.","type":"object","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"items":{"description":"items is the list of CronJobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJobList","version":"v1beta1"}]},"io.k8s.api.batch.v1beta1.CronJobSpec":{"description":"CronJobSpec describes how the job execution will look like and when it will actually run.","type":"object","required":["schedule","jobTemplate"],"properties":{"concurrencyPolicy":{"description":"Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one","type":"string"},"failedJobsHistoryLimit":{"description":"The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.","type":"integer","format":"int32"},"jobTemplate":{"description":"Specifies the job that will be created when executing a CronJob.","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.JobTemplateSpec"},"schedule":{"description":"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.","type":"string","default":""},"startingDeadlineSeconds":{"description":"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.","type":"integer","format":"int64"},"successfulJobsHistoryLimit":{"description":"The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.","type":"integer","format":"int32"},"suspend":{"description":"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.","type":"boolean"}}},"io.k8s.api.batch.v1beta1.CronJobStatus":{"description":"CronJobStatus represents the current state of a cron job.","type":"object","properties":{"active":{"description":"A list of pointers to currently running jobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectReference"},"x-kubernetes-list-type":"atomic"},"lastScheduleTime":{"description":"Information when was the last time the job was successfully scheduled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"lastSuccessfulTime":{"description":"Information when was the last time the job successfully completed.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.api.batch.v1beta1.JobTemplateSpec":{"description":"JobTemplateSpec describes the data a Job should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobSpec"}}},"io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource":{"description":"Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).","type":"integer","format":"int32"},"readOnly":{"description":"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"boolean"},"volumeID":{"description":"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string","default":""}}},"io.k8s.api.core.v1.Affinity":{"description":"Affinity is a group of affinity scheduling rules.","type":"object","properties":{"nodeAffinity":{"description":"Describes node affinity scheduling rules for the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeAffinity"},"podAffinity":{"description":"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinity"},"podAntiAffinity":{"description":"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAntiAffinity"}}},"io.k8s.api.core.v1.AzureDiskVolumeSource":{"description":"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","type":"object","required":["diskName","diskURI"],"properties":{"cachingMode":{"description":"cachingMode is the Host Caching mode: None, Read Only, Read Write.","type":"string"},"diskName":{"description":"diskName is the Name of the data disk in the blob storage","type":"string","default":""},"diskURI":{"description":"diskURI is the URI of data disk in the blob storage","type":"string","default":""},"fsType":{"description":"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"kind":{"description":"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"}}},"io.k8s.api.core.v1.AzureFileVolumeSource":{"description":"AzureFile represents an Azure File Service mount on the host and bind mount to the pod.","type":"object","required":["secretName","shareName"],"properties":{"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretName":{"description":"secretName is the name of secret that contains Azure Storage Account Name and Key","type":"string","default":""},"shareName":{"description":"shareName is the azure share Name","type":"string","default":""}}},"io.k8s.api.core.v1.CSIVolumeSource":{"description":"Represents a source location of a volume to mount, managed by an external CSI driver","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.","type":"string","default":""},"fsType":{"description":"fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.","type":"string"},"nodePublishSecretRef":{"description":"nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"readOnly":{"description":"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).","type":"boolean"},"volumeAttributes":{"description":"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.","type":"object","additionalProperties":{"type":"string","default":""}}}},"io.k8s.api.core.v1.Capabilities":{"description":"Adds and removes POSIX capabilities from running containers.","type":"object","properties":{"add":{"description":"Added capabilities","type":"array","items":{"type":"string","default":""}},"drop":{"description":"Removed capabilities","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.CephFSVolumeSource":{"description":"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["monitors"],"properties":{"monitors":{"description":"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"path":{"description":"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /","type":"string"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"boolean"},"secretFile":{"description":"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"},"secretRef":{"description":"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.CinderVolumeSource":{"description":"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"boolean"},"secretRef":{"description":"secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeID":{"description":"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string","default":""}}},"io.k8s.api.core.v1.ConfigMapEnvSource":{"description":"ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapKeySelector":{"description":"Selects a key from a ConfigMap.","type":"object","required":["key"],"properties":{"key":{"description":"The key to select.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ConfigMapProjection":{"description":"Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapVolumeSource":{"description":"Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.Container":{"description":"A single application container that you want to run within a pod.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Actions that the management system should take in response to container lifecycle events. Cannot be updated.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.","type":"string","default":""},"ports":{"description":"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.ContainerPort":{"description":"ContainerPort represents a network port in a single container.","type":"object","required":["containerPort"],"properties":{"containerPort":{"description":"Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.","type":"integer","format":"int32","default":0},"hostIP":{"description":"What host IP to bind the external port to.","type":"string"},"hostPort":{"description":"Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.","type":"integer","format":"int32"},"name":{"description":"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.","type":"string"},"protocol":{"description":"Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\n\n","type":"string","default":"TCP"}}},"io.k8s.api.core.v1.DownwardAPIProjection":{"description":"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.","type":"object","properties":{"items":{"description":"Items is a list of DownwardAPIVolume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.DownwardAPIVolumeFile":{"description":"DownwardAPIVolumeFile represents information to create the file containing the pod field","type":"object","required":["path"],"properties":{"fieldRef":{"description":"Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"mode":{"description":"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'","type":"string","default":""},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"}}},"io.k8s.api.core.v1.DownwardAPIVolumeSource":{"description":"DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"Items is a list of downward API volume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.EmptyDirVolumeSource":{"description":"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.","type":"object","properties":{"medium":{"description":"medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","type":"string"},"sizeLimit":{"description":"sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}},"io.k8s.api.core.v1.EnvFromSource":{"description":"EnvFromSource represents the source of a set of ConfigMaps","type":"object","properties":{"configMapRef":{"description":"The ConfigMap to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapEnvSource"},"prefix":{"description":"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.","type":"string"},"secretRef":{"description":"The Secret to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretEnvSource"}}},"io.k8s.api.core.v1.EnvVar":{"description":"EnvVar represents an environment variable present in a Container.","type":"object","required":["name"],"properties":{"name":{"description":"Name of the environment variable. Must be a C_IDENTIFIER.","type":"string","default":""},"value":{"description":"Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".","type":"string"},"valueFrom":{"description":"Source for the environment variable's value. Cannot be used if value is not empty.","$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVarSource"}}},"io.k8s.api.core.v1.EnvVarSource":{"description":"EnvVarSource represents a source for the value of an EnvVar.","type":"object","properties":{"configMapKeyRef":{"description":"Selects a key of a ConfigMap.","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapKeySelector"},"fieldRef":{"description":"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"},"secretKeyRef":{"description":"Selects a key of a secret in the pod's namespace","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretKeySelector"}}},"io.k8s.api.core.v1.EphemeralContainer":{"description":"An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Lifecycle is not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.","type":"string","default":""},"ports":{"description":"Ports are not allowed for ephemeral containers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"targetContainerName":{"description":"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.","type":"string"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.EphemeralVolumeSource":{"description":"Represents an ephemeral volume that is handled by a normal storage driver.","type":"object","properties":{"volumeClaimTemplate":{"description":"Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimTemplate"}}},"io.k8s.api.core.v1.ExecAction":{"description":"ExecAction describes a \"run in container\" action.","type":"object","properties":{"command":{"description":"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FCVolumeSource":{"description":"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"lun":{"description":"lun is Optional: FC target lun number","type":"integer","format":"int32"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"targetWWNs":{"description":"targetWWNs is Optional: FC target worldwide names (WWNs)","type":"array","items":{"type":"string","default":""}},"wwids":{"description":"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FlexVolumeSource":{"description":"FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the driver to use for this volume.","type":"string","default":""},"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.","type":"string"},"options":{"description":"options is Optional: this field holds extra command options if any.","type":"object","additionalProperties":{"type":"string","default":""}},"readOnly":{"description":"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"}}},"io.k8s.api.core.v1.FlockerVolumeSource":{"description":"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.","type":"object","properties":{"datasetName":{"description":"datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated","type":"string"},"datasetUUID":{"description":"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset","type":"string"}}},"io.k8s.api.core.v1.GCEPersistentDiskVolumeSource":{"description":"Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.","type":"object","required":["pdName"],"properties":{"fsType":{"description":"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"integer","format":"int32"},"pdName":{"description":"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string","default":""},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"boolean"}}},"io.k8s.api.core.v1.GRPCAction":{"type":"object","required":["port"],"properties":{"port":{"description":"Port number of the gRPC service. Number must be in the range 1 to 65535.","type":"integer","format":"int32","default":0},"service":{"description":"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.","type":"string","default":""}}},"io.k8s.api.core.v1.GitRepoVolumeSource":{"description":"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","type":"object","required":["repository"],"properties":{"directory":{"description":"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.","type":"string"},"repository":{"description":"repository is the URL","type":"string","default":""},"revision":{"description":"revision is the commit hash for the specified revision.","type":"string"}}},"io.k8s.api.core.v1.GlusterfsVolumeSource":{"description":"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["endpoints","path"],"properties":{"endpoints":{"description":"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"path":{"description":"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"readOnly":{"description":"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"boolean"}}},"io.k8s.api.core.v1.HTTPGetAction":{"description":"HTTPGetAction describes an action based on HTTP Get requests.","type":"object","required":["port"],"properties":{"host":{"description":"Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.","type":"string"},"httpHeaders":{"description":"Custom headers to set in the request. HTTP allows repeated headers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPHeader"}},"path":{"description":"Path to access on the HTTP server.","type":"string"},"port":{"description":"Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"},"scheme":{"description":"Scheme to use for connecting to the host. Defaults to HTTP.\n\n","type":"string"}}},"io.k8s.api.core.v1.HTTPHeader":{"description":"HTTPHeader describes a custom header to be used in HTTP probes","type":"object","required":["name","value"],"properties":{"name":{"description":"The header field name","type":"string","default":""},"value":{"description":"The header field value","type":"string","default":""}}},"io.k8s.api.core.v1.HostAlias":{"description":"HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.","type":"object","properties":{"hostnames":{"description":"Hostnames for the above IP address.","type":"array","items":{"type":"string","default":""}},"ip":{"description":"IP address of the host file entry.","type":"string"}}},"io.k8s.api.core.v1.HostPathVolumeSource":{"description":"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.","type":"object","required":["path"],"properties":{"path":{"description":"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string","default":""},"type":{"description":"type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string"}}},"io.k8s.api.core.v1.ISCSIVolumeSource":{"description":"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.","type":"object","required":["targetPortal","iqn","lun"],"properties":{"chapAuthDiscovery":{"description":"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication","type":"boolean"},"chapAuthSession":{"description":"chapAuthSession defines whether support iSCSI Session CHAP authentication","type":"boolean"},"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi","type":"string"},"initiatorName":{"description":"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.","type":"string"},"iqn":{"description":"iqn is the target iSCSI Qualified Name.","type":"string","default":""},"iscsiInterface":{"description":"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).","type":"string"},"lun":{"description":"lun represents iSCSI Target Lun number.","type":"integer","format":"int32","default":0},"portals":{"description":"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"array","items":{"type":"string","default":""}},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.","type":"boolean"},"secretRef":{"description":"secretRef is the CHAP Secret for iSCSI target and initiator authentication","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"targetPortal":{"description":"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"string","default":""}}},"io.k8s.api.core.v1.KeyToPath":{"description":"Maps a string key to a path within a volume.","type":"object","required":["key","path"],"properties":{"key":{"description":"key is the key to project.","type":"string","default":""},"mode":{"description":"mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.","type":"string","default":""}}},"io.k8s.api.core.v1.Lifecycle":{"description":"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.","type":"object","properties":{"postStart":{"description":"PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"},"preStop":{"description":"PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"}}},"io.k8s.api.core.v1.LifecycleHandler":{"description":"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"tcpSocket":{"description":"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"}}},"io.k8s.api.core.v1.LocalObjectReference":{"description":"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NFSVolumeSource":{"description":"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.","type":"object","required":["server","path"],"properties":{"path":{"description":"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""},"readOnly":{"description":"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"boolean"},"server":{"description":"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""}}},"io.k8s.api.core.v1.NodeAffinity":{"description":"Node affinity is a group of node affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PreferredSchedulingTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelector"}}},"io.k8s.api.core.v1.NodeSelector":{"description":"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.","type":"object","required":["nodeSelectorTerms"],"properties":{"nodeSelectorTerms":{"description":"Required. A list of node selector terms. The terms are ORed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NodeSelectorRequirement":{"description":"A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"The label key that the selector applies to.","type":"string","default":""},"operator":{"description":"Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\n","type":"string","default":""},"values":{"description":"An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.NodeSelectorTerm":{"description":"A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.","type":"object","properties":{"matchExpressions":{"description":"A list of node selector requirements by node's labels.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}},"matchFields":{"description":"A list of node selector requirements by node's fields.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectFieldSelector":{"description":"ObjectFieldSelector selects an APIVersioned field of an object.","type":"object","required":["fieldPath"],"properties":{"apiVersion":{"description":"Version of the schema the FieldPath is written in terms of, defaults to \"v1\".","type":"string"},"fieldPath":{"description":"Path of the field to select in the specified API version.","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectReference":{"description":"ObjectReference contains enough information to let you inspect or modify the referred object.","type":"object","properties":{"apiVersion":{"description":"API version of the referent.","type":"string"},"fieldPath":{"description":"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.","type":"string"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"namespace":{"description":"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/","type":"string"},"resourceVersion":{"description":"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.PersistentVolumeClaimSpec":{"description":"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes","type":"object","properties":{"accessModes":{"description":"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1","type":"array","items":{"type":"string","default":""}},"dataSource":{"description":"dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"dataSourceRef":{"description":"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"resources":{"description":"resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"selector":{"description":"selector is a label query over volumes to consider for binding.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"storageClassName":{"description":"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1","type":"string"},"volumeMode":{"description":"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.","type":"string"},"volumeName":{"description":"volumeName is the binding reference to the PersistentVolume backing this claim.","type":"string"}}},"io.k8s.api.core.v1.PersistentVolumeClaimTemplate":{"description":"PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.","type":"object","required":["spec"],"properties":{"metadata":{"description":"May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimSpec"}}},"io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource":{"description":"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).","type":"object","required":["claimName"],"properties":{"claimName":{"description":"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","type":"string","default":""},"readOnly":{"description":"readOnly Will force the ReadOnly setting in VolumeMounts. Default false.","type":"boolean"}}},"io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource":{"description":"Represents a Photon Controller persistent disk resource.","type":"object","required":["pdID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"pdID":{"description":"pdID is the ID that identifies Photon Controller persistent disk","type":"string","default":""}}},"io.k8s.api.core.v1.PodAffinity":{"description":"Pod affinity is a group of inter pod affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodAffinityTerm":{"description":"Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running","type":"object","required":["topologyKey"],"properties":{"labelSelector":{"description":"A label query over a set of resources, in this case pods.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaceSelector":{"description":"A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaces":{"description":"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"","type":"array","items":{"type":"string","default":""}},"topologyKey":{"description":"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.","type":"string","default":""}}},"io.k8s.api.core.v1.PodAntiAffinity":{"description":"Pod anti affinity is a group of inter pod anti affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodDNSConfig":{"description":"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.","type":"object","properties":{"nameservers":{"description":"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.","type":"array","items":{"type":"string","default":""}},"options":{"description":"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfigOption"}},"searches":{"description":"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.PodDNSConfigOption":{"description":"PodDNSConfigOption defines DNS resolver options of a pod.","type":"object","properties":{"name":{"description":"Required.","type":"string"},"value":{"type":"string"}}},"io.k8s.api.core.v1.PodOS":{"description":"PodOS defines the OS parameters of a pod.","type":"object","required":["name"],"properties":{"name":{"description":"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null","type":"string","default":""}}},"io.k8s.api.core.v1.PodReadinessGate":{"description":"PodReadinessGate contains the reference to a pod condition","type":"object","required":["conditionType"],"properties":{"conditionType":{"description":"ConditionType refers to a condition in the pod's condition list with matching type.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.PodSecurityContext":{"description":"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.","type":"object","properties":{"fsGroup":{"description":"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"fsGroupChangePolicy":{"description":"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"supplementalGroups":{"description":"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"type":"integer","format":"int64","default":0}},"sysctls":{"description":"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Sysctl"}},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.PodSpec":{"description":"PodSpec is a description of a pod.","type":"object","required":["containers"],"properties":{"activeDeadlineSeconds":{"description":"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.","type":"integer","format":"int64"},"affinity":{"description":"If specified, the pod's scheduling constraints","$ref":"#/components/schemas/io.k8s.api.core.v1.Affinity"},"automountServiceAccountToken":{"description":"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.","type":"boolean"},"containers":{"description":"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"dnsConfig":{"description":"Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfig"},"dnsPolicy":{"description":"Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\n","type":"string"},"enableServiceLinks":{"description":"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.","type":"boolean"},"ephemeralContainers":{"description":"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralContainer"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"hostAliases":{"description":"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HostAlias"},"x-kubernetes-patch-merge-key":"ip","x-kubernetes-patch-strategy":"merge"},"hostIPC":{"description":"Use the host's ipc namespace. Optional: Default to false.","type":"boolean"},"hostNetwork":{"description":"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.","type":"boolean"},"hostPID":{"description":"Use the host's pid namespace. Optional: Default to false.","type":"boolean"},"hostname":{"description":"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.","type":"string"},"imagePullSecrets":{"description":"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"initContainers":{"description":"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"nodeName":{"description":"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.","type":"string"},"nodeSelector":{"description":"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/","type":"object","additionalProperties":{"type":"string","default":""},"x-kubernetes-map-type":"atomic"},"os":{"description":"Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature","$ref":"#/components/schemas/io.k8s.api.core.v1.PodOS"},"overhead":{"description":"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"preemptionPolicy":{"description":"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.","type":"string"},"priority":{"description":"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.","type":"integer","format":"int32"},"priorityClassName":{"description":"If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.","type":"string"},"readinessGates":{"description":"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodReadinessGate"}},"restartPolicy":{"description":"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n","type":"string"},"runtimeClassName":{"description":"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.","type":"string"},"schedulerName":{"description":"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.","type":"string"},"securityContext":{"description":"SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodSecurityContext"},"serviceAccount":{"description":"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.","type":"string"},"serviceAccountName":{"description":"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/","type":"string"},"setHostnameAsFQDN":{"description":"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.","type":"boolean"},"shareProcessNamespace":{"description":"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.","type":"boolean"},"subdomain":{"description":"If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.","type":"string"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.","type":"integer","format":"int64"},"tolerations":{"description":"If specified, the pod's tolerations.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Toleration"}},"topologySpreadConstraints":{"description":"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.TopologySpreadConstraint"},"x-kubernetes-list-map-keys":["topologyKey","whenUnsatisfiable"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"topologyKey","x-kubernetes-patch-strategy":"merge"},"volumes":{"description":"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Volume"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge,retainKeys"}}},"io.k8s.api.core.v1.PodTemplateSpec":{"description":"PodTemplateSpec describes the data a pod should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodSpec"}}},"io.k8s.api.core.v1.PortworxVolumeSource":{"description":"PortworxVolumeSource represents a Portworx volume resource.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"volumeID":{"description":"volumeID uniquely identifies a Portworx volume","type":"string","default":""}}},"io.k8s.api.core.v1.PreferredSchedulingTerm":{"description":"An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).","type":"object","required":["weight","preference"],"properties":{"preference":{"description":"A node selector term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"},"weight":{"description":"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.Probe":{"description":"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"failureThreshold":{"description":"Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.","type":"integer","format":"int32"},"grpc":{"description":"GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.","$ref":"#/components/schemas/io.k8s.api.core.v1.GRPCAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"initialDelaySeconds":{"description":"Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"},"periodSeconds":{"description":"How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.","type":"integer","format":"int32"},"successThreshold":{"description":"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.","type":"integer","format":"int32"},"tcpSocket":{"description":"TCPSocket specifies an action involving a TCP port.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.","type":"integer","format":"int64"},"timeoutSeconds":{"description":"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"}}},"io.k8s.api.core.v1.ProjectedVolumeSource":{"description":"Represents a projected volume source","type":"object","properties":{"defaultMode":{"description":"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"sources":{"description":"sources is the list of volume projections","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeProjection"}}}},"io.k8s.api.core.v1.QuobyteVolumeSource":{"description":"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.","type":"object","required":["registry","volume"],"properties":{"group":{"description":"group to map volume access to Default is no group","type":"string"},"readOnly":{"description":"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.","type":"boolean"},"registry":{"description":"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes","type":"string","default":""},"tenant":{"description":"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin","type":"string"},"user":{"description":"user to map volume access to Defaults to serivceaccount user","type":"string"},"volume":{"description":"volume is a string that references an already created Quobyte volume by name.","type":"string","default":""}}},"io.k8s.api.core.v1.RBDVolumeSource":{"description":"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.","type":"object","required":["monitors","image"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd","type":"string"},"image":{"description":"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string","default":""},"keyring":{"description":"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"monitors":{"description":"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"pool":{"description":"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"boolean"},"secretRef":{"description":"secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.ResourceFieldSelector":{"description":"ResourceFieldSelector represents container resources (cpu, memory) and their output format","type":"object","required":["resource"],"properties":{"containerName":{"description":"Container name: required for volumes, optional for env vars","type":"string"},"divisor":{"description":"Specifies the output format of the exposed resources, defaults to \"1\"","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"},"resource":{"description":"Required: resource to select","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ResourceRequirements":{"description":"ResourceRequirements describes the compute resource requirements.","type":"object","properties":{"limits":{"description":"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"requests":{"description":"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}}},"io.k8s.api.core.v1.SELinuxOptions":{"description":"SELinuxOptions are the labels to be applied to the container","type":"object","properties":{"level":{"description":"Level is SELinux level label that applies to the container.","type":"string"},"role":{"description":"Role is a SELinux role label that applies to the container.","type":"string"},"type":{"description":"Type is a SELinux type label that applies to the container.","type":"string"},"user":{"description":"User is a SELinux user label that applies to the container.","type":"string"}}},"io.k8s.api.core.v1.ScaleIOVolumeSource":{"description":"ScaleIOVolumeSource represents a persistent ScaleIO volume","type":"object","required":["gateway","system","secretRef"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".","type":"string"},"gateway":{"description":"gateway is the host address of the ScaleIO API Gateway.","type":"string","default":""},"protectionDomain":{"description":"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"sslEnabled":{"description":"sslEnabled Flag enable/disable SSL communication with Gateway, default false","type":"boolean"},"storageMode":{"description":"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.","type":"string"},"storagePool":{"description":"storagePool is the ScaleIO Storage Pool associated with the protection domain.","type":"string"},"system":{"description":"system is the name of the storage system as configured in ScaleIO.","type":"string","default":""},"volumeName":{"description":"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.","type":"string"}}},"io.k8s.api.core.v1.SeccompProfile":{"description":"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.","type":"object","required":["type"],"properties":{"localhostProfile":{"description":"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".","type":"string"},"type":{"description":"type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.\n\n","type":"string","default":""}},"x-kubernetes-unions":[{"discriminator":"type","fields-to-discriminateBy":{"localhostProfile":"LocalhostProfile"}}]},"io.k8s.api.core.v1.SecretEnvSource":{"description":"SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretKeySelector":{"description":"SecretKeySelector selects a key of a Secret.","type":"object","required":["key"],"properties":{"key":{"description":"The key of the secret to select from. Must be a valid secret key.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.SecretProjection":{"description":"Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional field specify whether the Secret or its key must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretVolumeSource":{"description":"Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"optional":{"description":"optional field specify whether the Secret or its keys must be defined","type":"boolean"},"secretName":{"description":"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","type":"string"}}},"io.k8s.api.core.v1.SecurityContext":{"description":"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.","type":"object","properties":{"allowPrivilegeEscalation":{"description":"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"capabilities":{"description":"The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.Capabilities"},"privileged":{"description":"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"procMount":{"description":"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"readOnlyRootFilesystem":{"description":"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by this container. If seccomp options are provided at both the pod \u0026 container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.ServiceAccountTokenProjection":{"description":"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).","type":"object","required":["path"],"properties":{"audience":{"description":"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.","type":"string"},"expirationSeconds":{"description":"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.","type":"integer","format":"int64"},"path":{"description":"path is the path relative to the mount point of the file to project the token into.","type":"string","default":""}}},"io.k8s.api.core.v1.StorageOSVolumeSource":{"description":"Represents a StorageOS persistent volume resource.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeName":{"description":"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.","type":"string"},"volumeNamespace":{"description":"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.","type":"string"}}},"io.k8s.api.core.v1.Sysctl":{"description":"Sysctl defines a kernel parameter to be set","type":"object","required":["name","value"],"properties":{"name":{"description":"Name of a property to set","type":"string","default":""},"value":{"description":"Value of a property to set","type":"string","default":""}}},"io.k8s.api.core.v1.TCPSocketAction":{"description":"TCPSocketAction describes an action based on opening a socket","type":"object","required":["port"],"properties":{"host":{"description":"Optional: Host name to connect to, defaults to the pod IP.","type":"string"},"port":{"description":"Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}}},"io.k8s.api.core.v1.Toleration":{"description":"The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.","type":"object","properties":{"effect":{"description":"Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n\n","type":"string"},"key":{"description":"Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.","type":"string"},"operator":{"description":"Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.\n\n","type":"string"},"tolerationSeconds":{"description":"TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.","type":"integer","format":"int64"},"value":{"description":"Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.","type":"string"}}},"io.k8s.api.core.v1.TopologySpreadConstraint":{"description":"TopologySpreadConstraint specifies how to spread matching pods among the given topology.","type":"object","required":["maxSkew","topologyKey","whenUnsatisfiable"],"properties":{"labelSelector":{"description":"LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"maxSkew":{"description":"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.","type":"integer","format":"int32","default":0},"topologyKey":{"description":"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.","type":"string","default":""},"whenUnsatisfiable":{"description":"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.TypedLocalObjectReference":{"description":"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.","type":"object","required":["kind","name"],"properties":{"apiGroup":{"description":"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.","type":"string"},"kind":{"description":"Kind is the type of resource being referenced","type":"string","default":""},"name":{"description":"Name is the name of resource being referenced","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.Volume":{"description":"Volume represents a named volume in a pod that may be accessed by any container in the pod.","type":"object","required":["name"],"properties":{"awsElasticBlockStore":{"description":"awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","$ref":"#/components/schemas/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"},"azureDisk":{"description":"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureDiskVolumeSource"},"azureFile":{"description":"azureFile represents an Azure File Service mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureFileVolumeSource"},"cephfs":{"description":"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.CephFSVolumeSource"},"cinder":{"description":"cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.CinderVolumeSource"},"configMap":{"description":"configMap represents a configMap that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapVolumeSource"},"csi":{"description":"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).","$ref":"#/components/schemas/io.k8s.api.core.v1.CSIVolumeSource"},"downwardAPI":{"description":"downwardAPI represents downward API about the pod that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeSource"},"emptyDir":{"description":"emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.api.core.v1.EmptyDirVolumeSource"},"ephemeral":{"description":"ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.","$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralVolumeSource"},"fc":{"description":"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.FCVolumeSource"},"flexVolume":{"description":"flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","$ref":"#/components/schemas/io.k8s.api.core.v1.FlexVolumeSource"},"flocker":{"description":"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running","$ref":"#/components/schemas/io.k8s.api.core.v1.FlockerVolumeSource"},"gcePersistentDisk":{"description":"gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","$ref":"#/components/schemas/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"},"gitRepo":{"description":"gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","$ref":"#/components/schemas/io.k8s.api.core.v1.GitRepoVolumeSource"},"glusterfs":{"description":"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.GlusterfsVolumeSource"},"hostPath":{"description":"hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","$ref":"#/components/schemas/io.k8s.api.core.v1.HostPathVolumeSource"},"iscsi":{"description":"iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.ISCSIVolumeSource"},"name":{"description":"name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string","default":""},"nfs":{"description":"nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","$ref":"#/components/schemas/io.k8s.api.core.v1.NFSVolumeSource"},"persistentVolumeClaim":{"description":"persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"},"photonPersistentDisk":{"description":"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"},"portworxVolume":{"description":"portworxVolume represents a portworx volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PortworxVolumeSource"},"projected":{"description":"projected items for all in one resources secrets, configmaps, and downward API","$ref":"#/components/schemas/io.k8s.api.core.v1.ProjectedVolumeSource"},"quobyte":{"description":"quobyte represents a Quobyte mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.QuobyteVolumeSource"},"rbd":{"description":"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.RBDVolumeSource"},"scaleIO":{"description":"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.ScaleIOVolumeSource"},"secret":{"description":"secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretVolumeSource"},"storageos":{"description":"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.StorageOSVolumeSource"},"vsphereVolume":{"description":"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"}}},"io.k8s.api.core.v1.VolumeDevice":{"description":"volumeDevice describes a mapping of a raw block device within a container.","type":"object","required":["name","devicePath"],"properties":{"devicePath":{"description":"devicePath is the path inside of the container that the device will be mapped to.","type":"string","default":""},"name":{"description":"name must match the name of a persistentVolumeClaim in the pod","type":"string","default":""}}},"io.k8s.api.core.v1.VolumeMount":{"description":"VolumeMount describes a mounting of a Volume within a container.","type":"object","required":["name","mountPath"],"properties":{"mountPath":{"description":"Path within the container at which the volume should be mounted. Must not contain ':'.","type":"string","default":""},"mountPropagation":{"description":"mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.","type":"string"},"name":{"description":"This must match the Name of a Volume.","type":"string","default":""},"readOnly":{"description":"Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.","type":"boolean"},"subPath":{"description":"Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).","type":"string"},"subPathExpr":{"description":"Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.","type":"string"}}},"io.k8s.api.core.v1.VolumeProjection":{"description":"Projection that may be projected along with other supported volume types","type":"object","properties":{"configMap":{"description":"configMap information about the configMap data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapProjection"},"downwardAPI":{"description":"downwardAPI information about the downwardAPI data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIProjection"},"secret":{"description":"secret information about the secret data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretProjection"},"serviceAccountToken":{"description":"serviceAccountToken is information about the serviceAccountToken data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ServiceAccountTokenProjection"}}},"io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource":{"description":"Represents a vSphere volume resource.","type":"object","required":["volumePath"],"properties":{"fsType":{"description":"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"storagePolicyID":{"description":"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.","type":"string"},"storagePolicyName":{"description":"storagePolicyName is the storage Policy Based Management (SPBM) profile name.","type":"string"},"volumePath":{"description":"volumePath is the path that identifies vSphere volume vmdk","type":"string","default":""}}},"io.k8s.api.core.v1.WeightedPodAffinityTerm":{"description":"The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)","type":"object","required":["weight","podAffinityTerm"],"properties":{"podAffinityTerm":{"description":"Required. A pod affinity term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"},"weight":{"description":"weight associated with matching the corresponding podAffinityTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.WindowsSecurityContextOptions":{"description":"WindowsSecurityContextOptions contain Windows-specific options and credentials.","type":"object","properties":{"gmsaCredentialSpec":{"description":"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.","type":"string"},"gmsaCredentialSpecName":{"description":"GMSACredentialSpecName is the name of the GMSA credential spec to use.","type":"string"},"hostProcess":{"description":"HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.","type":"boolean"},"runAsUserName":{"description":"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"string"}}},"io.k8s.apimachinery.pkg.api.resource.Quantity":{"description":"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.","type":"string"},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResource":{"description":"APIResource specifies the name of a resource and whether it is namespaced.","type":"object","required":["name","singularName","namespaced","kind","verbs"],"properties":{"categories":{"description":"categories is a list of the grouped resources this resource belongs to (e.g. 'all')","type":"array","items":{"type":"string","default":""}},"group":{"description":"group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".","type":"string"},"kind":{"description":"kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')","type":"string","default":""},"name":{"description":"name is the plural name of the resource.","type":"string","default":""},"namespaced":{"description":"namespaced indicates if a resource is namespaced or not.","type":"boolean","default":false},"shortNames":{"description":"shortNames is a list of suggested short names of the resource.","type":"array","items":{"type":"string","default":""}},"singularName":{"description":"singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.","type":"string","default":""},"storageVersionHash":{"description":"The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.","type":"string"},"verbs":{"description":"verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)","type":"array","items":{"type":"string","default":""}},"version":{"description":"version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList":{"description":"APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.","type":"object","required":["groupVersion","resources"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"groupVersion":{"description":"groupVersion is the group and version this APIResourceList is for.","type":"string","default":""},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"resources":{"description":"resources contains the name of the resources and if they are namespaced.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"}}},"x-kubernetes-group-version-kind":[{"group":"","kind":"APIResourceList","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions":{"description":"DeleteOptions may be provided when deleting an API object.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"dryRun":{"description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","type":"array","items":{"type":"string","default":""}},"gracePeriodSeconds":{"description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","type":"integer","format":"int64"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"orphanDependents":{"description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","type":"boolean"},"preconditions":{"description":"Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"},"propagationPolicy":{"description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta2"},{"group":"batch","kind":"DeleteOptions","version":"v1"},{"group":"batch","kind":"DeleteOptions","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"extensions","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"policy","kind":"DeleteOptions","version":"v1"},{"group":"policy","kind":"DeleteOptions","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1":{"description":"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector":{"description":"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.","type":"object","properties":{"matchExpressions":{"description":"matchExpressions is a list of label selector requirements. The requirements are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"}},"matchLabels":{"description":"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.","type":"object","additionalProperties":{"type":"string","default":""}}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement":{"description":"A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"key is the label key that the selector applies to.","type":"string","default":"","x-kubernetes-patch-merge-key":"key","x-kubernetes-patch-strategy":"merge"},"operator":{"description":"operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.","type":"string","default":""},"values":{"description":"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta":{"description":"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.","type":"object","properties":{"continue":{"description":"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.","type":"string"},"remainingItemCount":{"description":"remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.","type":"integer","format":"int64"},"resourceVersion":{"description":"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry":{"description":"ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.","type":"string"},"fieldsType":{"description":"FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"","type":"string"},"fieldsV1":{"description":"FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"},"manager":{"description":"Manager is an identifier of the workflow managing these fields.","type":"string"},"operation":{"description":"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.","type":"string"},"subresource":{"description":"Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.","type":"string"},"time":{"description":"Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta":{"description":"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.","type":"object","properties":{"annotations":{"description":"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations","type":"object","additionalProperties":{"type":"string","default":""}},"clusterName":{"description":"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.","type":"string"},"creationTimestamp":{"description":"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"deletionGracePeriodSeconds":{"description":"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.","type":"integer","format":"int64"},"deletionTimestamp":{"description":"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"finalizers":{"description":"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.","type":"array","items":{"type":"string","default":""},"x-kubernetes-patch-strategy":"merge"},"generateName":{"description":"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency","type":"string"},"generation":{"description":"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.","type":"integer","format":"int64"},"labels":{"description":"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels","type":"object","additionalProperties":{"type":"string","default":""}},"managedFields":{"description":"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"}},"name":{"description":"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string"},"namespace":{"description":"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces","type":"string"},"ownerReferences":{"description":"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"},"x-kubernetes-patch-merge-key":"uid","x-kubernetes-patch-strategy":"merge"},"resourceVersion":{"description":"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"},"uid":{"description":"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference":{"description":"OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.","type":"object","required":["apiVersion","kind","name","uid"],"properties":{"apiVersion":{"description":"API version of the referent.","type":"string","default":""},"blockOwnerDeletion":{"description":"If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.","type":"boolean"},"controller":{"description":"If true, this reference points to the managing controller.","type":"boolean"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string","default":""},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.Patch":{"description":"Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions":{"description":"Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.","type":"object","properties":{"resourceVersion":{"description":"Specifies the target ResourceVersion","type":"string"},"uid":{"description":"Specifies the target UID.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Status":{"description":"Status is a return value for calls that don't return other objects.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"code":{"description":"Suggested HTTP return code for this status, 0 if not set.","type":"integer","format":"int32"},"details":{"description":"Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"message":{"description":"A human-readable description of the status of this operation.","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"},"reason":{"description":"A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.","type":"string"},"status":{"description":"Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"Status","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause":{"description":"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.","type":"object","properties":{"field":{"description":"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"","type":"string"},"message":{"description":"A human-readable description of the cause of the error. This field may be presented as-is to a reader.","type":"string"},"reason":{"description":"A machine-readable description of the cause of the error. If this value is empty there is no information available.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails":{"description":"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.","type":"object","properties":{"causes":{"description":"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"}},"group":{"description":"The group attribute of the resource associated with the status StatusReason.","type":"string"},"kind":{"description":"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).","type":"string"},"retryAfterSeconds":{"description":"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.","type":"integer","format":"int32"},"uid":{"description":"UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Time":{"description":"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.","type":"string","format":"date-time"},"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent":{"description":"Event represents a single event to a watched resource.","type":"object","required":["type","object"],"properties":{"object":{"description":"Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension"},"type":{"type":"string","default":""}},"x-kubernetes-group-version-kind":[{"group":"","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1"},{"group":"apps","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta2"},{"group":"batch","kind":"WatchEvent","version":"v1"},{"group":"batch","kind":"WatchEvent","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"extensions","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"policy","kind":"WatchEvent","version":"v1"},{"group":"policy","kind":"WatchEvent","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.runtime.RawExtension":{"description":"RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)","type":"object"},"io.k8s.apimachinery.pkg.util.intstr.IntOrString":{"description":"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.","type":"string","format":"int-or-string"}}}} +{"openapi":"3.0.0","info":{"title":"Kubernetes","version":"v1.24.0"},"paths":{"/apis/batch/v1beta1/":{"get":{"tags":["batch_v1beta1"],"description":"get available resources","operationId":"getBatchV1beta1APIResources","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"}}}}}}},"/apis/batch/v1beta1/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1beta1CronJobForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1beta1/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"list or watch objects of kind CronJob","operationId":"listBatchV1beta1NamespacedCronJob","parameters":[{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}},{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobList"}}}}}},"post":{"tags":["batch_v1beta1"],"description":"create a CronJob","operationId":"createBatchV1beta1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"delete":{"tags":["batch_v1beta1"],"description":"delete collection of CronJob","operationId":"deleteBatchV1beta1CollectionNamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"parameters":[{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1beta1"],"description":"read the specified CronJob","operationId":"readBatchV1beta1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"put":{"tags":["batch_v1beta1"],"description":"replace the specified CronJob","operationId":"replaceBatchV1beta1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"delete":{"tags":["batch_v1beta1"],"description":"delete a CronJob","operationId":"deleteBatchV1beta1NamespacedCronJob","parameters":[{"name":"gracePeriodSeconds","in":"query","description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","schema":{"type":"integer","uniqueItems":true}},{"name":"orphanDependents","in":"query","description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","schema":{"type":"boolean","uniqueItems":true}},{"name":"propagationPolicy","in":"query","description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","schema":{"type":"string","uniqueItems":true}},{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}},"202":{"description":"Accepted","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"}}}}}},"patch":{"tags":["batch_v1beta1"],"description":"partially update the specified CronJob","operationId":"patchBatchV1beta1NamespacedCronJob","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status":{"get":{"tags":["batch_v1beta1"],"description":"read status of the specified CronJob","operationId":"readBatchV1beta1NamespacedCronJobStatus","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"put":{"tags":["batch_v1beta1"],"description":"replace status of the specified CronJob","operationId":"replaceBatchV1beta1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"patch":{"tags":["batch_v1beta1"],"description":"partially update status of the specified CronJob","operationId":"patchBatchV1beta1NamespacedCronJobStatus","parameters":[{"name":"dryRun","in":"query","description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","schema":{"type":"string","uniqueItems":true}},{"name":"force","in":"query","description":"Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.","schema":{"type":"boolean","uniqueItems":true}},{"name":"fieldManager","in":"query","description":"fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).","schema":{"type":"string","uniqueItems":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}},"201":{"description":"Created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}}}}}},"parameters":[{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}}]},"/apis/batch/v1beta1/watch/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1beta1CronJobListForAllNamespaces","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs":{"get":{"tags":["batch_v1beta1"],"description":"watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.","operationId":"watchBatchV1beta1NamespacedCronJobList","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]},"/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs/{name}":{"get":{"tags":["batch_v1beta1"],"description":"watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.","operationId":"watchBatchV1beta1NamespacedCronJob","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/json;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/vnd.kubernetes.protobuf;stream=watch":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}},"application/yaml":{"schema":{"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"}}}}}},"parameters":[{"name":"allowWatchBookmarks","in":"query","description":"allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.","schema":{"type":"boolean","uniqueItems":true}},{"name":"continue","in":"query","description":"The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.","schema":{"type":"string","uniqueItems":true}},{"name":"fieldSelector","in":"query","description":"A selector to restrict the list of returned objects by their fields. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"labelSelector","in":"query","description":"A selector to restrict the list of returned objects by their labels. Defaults to everything.","schema":{"type":"string","uniqueItems":true}},{"name":"limit","in":"query","description":"limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.","schema":{"type":"integer","uniqueItems":true}},{"name":"name","in":"path","description":"name of the CronJob","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"namespace","in":"path","description":"object name and auth scope, such as for teams and projects","required":true,"schema":{"type":"string","uniqueItems":true}},{"name":"pretty","in":"query","description":"If 'true', then the output is pretty printed.","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersion","in":"query","description":"resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"resourceVersionMatch","in":"query","description":"resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset","schema":{"type":"string","uniqueItems":true}},{"name":"timeoutSeconds","in":"query","description":"Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.","schema":{"type":"integer","uniqueItems":true}},{"name":"watch","in":"query","description":"Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.","schema":{"type":"boolean","uniqueItems":true}}]}},"components":{"schemas":{"io.k8s.api.batch.v1.JobSpec":{"description":"JobSpec describes how the job execution will look like.","type":"object","required":["template"],"properties":{"activeDeadlineSeconds":{"description":"Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.","type":"integer","format":"int64"},"backoffLimit":{"description":"Specifies the number of retries before marking this job failed. Defaults to 6","type":"integer","format":"int32"},"completionMode":{"description":"CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.","type":"string"},"completions":{"description":"Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"manualSelector":{"description":"manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector","type":"boolean"},"parallelism":{"description":"Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","type":"integer","format":"int32"},"selector":{"description":"A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"suspend":{"description":"Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).","type":"boolean"},"template":{"description":"Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodTemplateSpec"},"ttlSecondsAfterFinished":{"description":"ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.","type":"integer","format":"int32"}}},"io.k8s.api.batch.v1beta1.CronJob":{"description":"CronJob represents the configuration of a single cron job.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobSpec"},"status":{"description":"Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJobStatus"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJob","version":"v1beta1"}]},"io.k8s.api.batch.v1beta1.CronJobList":{"description":"CronJobList is a collection of cron jobs.","type":"object","required":["items"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"items":{"description":"items is the list of CronJobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.CronJob"}},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"}},"x-kubernetes-group-version-kind":[{"group":"batch","kind":"CronJobList","version":"v1beta1"}]},"io.k8s.api.batch.v1beta1.CronJobSpec":{"description":"CronJobSpec describes how the job execution will look like and when it will actually run.","type":"object","required":["schedule","jobTemplate"],"properties":{"concurrencyPolicy":{"description":"Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one","type":"string"},"failedJobsHistoryLimit":{"description":"The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.","type":"integer","format":"int32"},"jobTemplate":{"description":"Specifies the job that will be created when executing a CronJob.","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1beta1.JobTemplateSpec"},"schedule":{"description":"The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.","type":"string","default":""},"startingDeadlineSeconds":{"description":"Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.","type":"integer","format":"int64"},"successfulJobsHistoryLimit":{"description":"The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.","type":"integer","format":"int32"},"suspend":{"description":"This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.","type":"boolean"}}},"io.k8s.api.batch.v1beta1.CronJobStatus":{"description":"CronJobStatus represents the current state of a cron job.","type":"object","properties":{"active":{"description":"A list of pointers to currently running jobs.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectReference"},"x-kubernetes-list-type":"atomic"},"lastScheduleTime":{"description":"Information when was the last time the job was successfully scheduled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"lastSuccessfulTime":{"description":"Information when was the last time the job successfully completed.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.api.batch.v1beta1.JobTemplateSpec":{"description":"JobTemplateSpec describes the data a Job should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.batch.v1.JobSpec"}}},"io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource":{"description":"Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).","type":"integer","format":"int32"},"readOnly":{"description":"readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"boolean"},"volumeID":{"description":"volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","type":"string","default":""}}},"io.k8s.api.core.v1.Affinity":{"description":"Affinity is a group of affinity scheduling rules.","type":"object","properties":{"nodeAffinity":{"description":"Describes node affinity scheduling rules for the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeAffinity"},"podAffinity":{"description":"Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinity"},"podAntiAffinity":{"description":"Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).","$ref":"#/components/schemas/io.k8s.api.core.v1.PodAntiAffinity"}}},"io.k8s.api.core.v1.AzureDiskVolumeSource":{"description":"AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","type":"object","required":["diskName","diskURI"],"properties":{"cachingMode":{"description":"cachingMode is the Host Caching mode: None, Read Only, Read Write.","type":"string"},"diskName":{"description":"diskName is the Name of the data disk in the blob storage","type":"string","default":""},"diskURI":{"description":"diskURI is the URI of data disk in the blob storage","type":"string","default":""},"fsType":{"description":"fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"kind":{"description":"kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"}}},"io.k8s.api.core.v1.AzureFileVolumeSource":{"description":"AzureFile represents an Azure File Service mount on the host and bind mount to the pod.","type":"object","required":["secretName","shareName"],"properties":{"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretName":{"description":"secretName is the name of secret that contains Azure Storage Account Name and Key","type":"string","default":""},"shareName":{"description":"shareName is the azure share Name","type":"string","default":""}}},"io.k8s.api.core.v1.CSIVolumeSource":{"description":"Represents a source location of a volume to mount, managed by an external CSI driver","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.","type":"string","default":""},"fsType":{"description":"fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.","type":"string"},"nodePublishSecretRef":{"description":"nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"readOnly":{"description":"readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).","type":"boolean"},"volumeAttributes":{"description":"volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.","type":"object","additionalProperties":{"type":"string","default":""}}}},"io.k8s.api.core.v1.Capabilities":{"description":"Adds and removes POSIX capabilities from running containers.","type":"object","properties":{"add":{"description":"Added capabilities","type":"array","items":{"type":"string","default":""}},"drop":{"description":"Removed capabilities","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.CephFSVolumeSource":{"description":"Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["monitors"],"properties":{"monitors":{"description":"monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"path":{"description":"path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /","type":"string"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"boolean"},"secretFile":{"description":"secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"},"secretRef":{"description":"secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.CinderVolumeSource":{"description":"Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"boolean"},"secretRef":{"description":"secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeID":{"description":"volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","type":"string","default":""}}},"io.k8s.api.core.v1.ConfigMapEnvSource":{"description":"ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapKeySelector":{"description":"Selects a key from a ConfigMap.","type":"object","required":["key"],"properties":{"key":{"description":"The key to select.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the ConfigMap or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ConfigMapProjection":{"description":"Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.ConfigMapVolumeSource":{"description":"Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional specify whether the ConfigMap or its keys must be defined","type":"boolean"}}},"io.k8s.api.core.v1.Container":{"description":"A single application container that you want to run within a pod.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Actions that the management system should take in response to container lifecycle events. Cannot be updated.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.","type":"string","default":""},"ports":{"description":"List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.ContainerPort":{"description":"ContainerPort represents a network port in a single container.","type":"object","required":["containerPort"],"properties":{"containerPort":{"description":"Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.","type":"integer","format":"int32","default":0},"hostIP":{"description":"What host IP to bind the external port to.","type":"string"},"hostPort":{"description":"Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.","type":"integer","format":"int32"},"name":{"description":"If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.","type":"string"},"protocol":{"description":"Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\n\n","type":"string","default":"TCP"}}},"io.k8s.api.core.v1.DownwardAPIProjection":{"description":"Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.","type":"object","properties":{"items":{"description":"Items is a list of DownwardAPIVolume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.DownwardAPIVolumeFile":{"description":"DownwardAPIVolumeFile represents information to create the file containing the pod field","type":"object","required":["path"],"properties":{"fieldRef":{"description":"Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"mode":{"description":"Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'","type":"string","default":""},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"}}},"io.k8s.api.core.v1.DownwardAPIVolumeSource":{"description":"DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"Items is a list of downward API volume file","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile"}}}},"io.k8s.api.core.v1.EmptyDirVolumeSource":{"description":"Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.","type":"object","properties":{"medium":{"description":"medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","type":"string"},"sizeLimit":{"description":"sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}},"io.k8s.api.core.v1.EnvFromSource":{"description":"EnvFromSource represents the source of a set of ConfigMaps","type":"object","properties":{"configMapRef":{"description":"The ConfigMap to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapEnvSource"},"prefix":{"description":"An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.","type":"string"},"secretRef":{"description":"The Secret to select from","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretEnvSource"}}},"io.k8s.api.core.v1.EnvVar":{"description":"EnvVar represents an environment variable present in a Container.","type":"object","required":["name"],"properties":{"name":{"description":"Name of the environment variable. Must be a C_IDENTIFIER.","type":"string","default":""},"value":{"description":"Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".","type":"string"},"valueFrom":{"description":"Source for the environment variable's value. Cannot be used if value is not empty.","$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVarSource"}}},"io.k8s.api.core.v1.EnvVarSource":{"description":"EnvVarSource represents a source for the value of an EnvVar.","type":"object","properties":{"configMapKeyRef":{"description":"Selects a key of a ConfigMap.","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapKeySelector"},"fieldRef":{"description":"Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.","$ref":"#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector"},"resourceFieldRef":{"description":"Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.","$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector"},"secretKeyRef":{"description":"Selects a key of a secret in the pod's namespace","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretKeySelector"}}},"io.k8s.api.core.v1.EphemeralContainer":{"description":"An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.","type":"object","required":["name"],"properties":{"args":{"description":"Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"command":{"description":"Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell","type":"array","items":{"type":"string","default":""}},"env":{"description":"List of environment variables to set in the container. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvVar"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"envFrom":{"description":"List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EnvFromSource"}},"image":{"description":"Container image name. More info: https://kubernetes.io/docs/concepts/containers/images","type":"string"},"imagePullPolicy":{"description":"Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\n","type":"string"},"lifecycle":{"description":"Lifecycle is not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Lifecycle"},"livenessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"name":{"description":"Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.","type":"string","default":""},"ports":{"description":"Ports are not allowed for ephemeral containers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ContainerPort"},"x-kubernetes-list-map-keys":["containerPort","protocol"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"containerPort","x-kubernetes-patch-strategy":"merge"},"readinessProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"resources":{"description":"Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"securityContext":{"description":"Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.","$ref":"#/components/schemas/io.k8s.api.core.v1.SecurityContext"},"startupProbe":{"description":"Probes are not allowed for ephemeral containers.","$ref":"#/components/schemas/io.k8s.api.core.v1.Probe"},"stdin":{"description":"Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.","type":"boolean"},"stdinOnce":{"description":"Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false","type":"boolean"},"targetContainerName":{"description":"If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.","type":"string"},"terminationMessagePath":{"description":"Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.","type":"string"},"terminationMessagePolicy":{"description":"Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\n","type":"string"},"tty":{"description":"Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.","type":"boolean"},"volumeDevices":{"description":"volumeDevices is the list of block devices to be used by the container.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeDevice"},"x-kubernetes-patch-merge-key":"devicePath","x-kubernetes-patch-strategy":"merge"},"volumeMounts":{"description":"Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeMount"},"x-kubernetes-patch-merge-key":"mountPath","x-kubernetes-patch-strategy":"merge"},"workingDir":{"description":"Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.","type":"string"}}},"io.k8s.api.core.v1.EphemeralVolumeSource":{"description":"Represents an ephemeral volume that is handled by a normal storage driver.","type":"object","properties":{"volumeClaimTemplate":{"description":"Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimTemplate"}}},"io.k8s.api.core.v1.ExecAction":{"description":"ExecAction describes a \"run in container\" action.","type":"object","properties":{"command":{"description":"Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FCVolumeSource":{"description":"Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"lun":{"description":"lun is Optional: FC target lun number","type":"integer","format":"int32"},"readOnly":{"description":"readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"targetWWNs":{"description":"targetWWNs is Optional: FC target worldwide names (WWNs)","type":"array","items":{"type":"string","default":""}},"wwids":{"description":"wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.FlexVolumeSource":{"description":"FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","type":"object","required":["driver"],"properties":{"driver":{"description":"driver is the name of the driver to use for this volume.","type":"string","default":""},"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.","type":"string"},"options":{"description":"options is Optional: this field holds extra command options if any.","type":"object","additionalProperties":{"type":"string","default":""}},"readOnly":{"description":"readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"}}},"io.k8s.api.core.v1.FlockerVolumeSource":{"description":"Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.","type":"object","properties":{"datasetName":{"description":"datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated","type":"string"},"datasetUUID":{"description":"datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset","type":"string"}}},"io.k8s.api.core.v1.GCEPersistentDiskVolumeSource":{"description":"Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.","type":"object","required":["pdName"],"properties":{"fsType":{"description":"fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string"},"partition":{"description":"partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"integer","format":"int32"},"pdName":{"description":"pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"string","default":""},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","type":"boolean"}}},"io.k8s.api.core.v1.GRPCAction":{"type":"object","required":["port"],"properties":{"port":{"description":"Port number of the gRPC service. Number must be in the range 1 to 65535.","type":"integer","format":"int32","default":0},"service":{"description":"Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.","type":"string","default":""}}},"io.k8s.api.core.v1.GitRepoVolumeSource":{"description":"Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","type":"object","required":["repository"],"properties":{"directory":{"description":"directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.","type":"string"},"repository":{"description":"repository is the URL","type":"string","default":""},"revision":{"description":"revision is the commit hash for the specified revision.","type":"string"}}},"io.k8s.api.core.v1.GlusterfsVolumeSource":{"description":"Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.","type":"object","required":["endpoints","path"],"properties":{"endpoints":{"description":"endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"path":{"description":"path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"string","default":""},"readOnly":{"description":"readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod","type":"boolean"}}},"io.k8s.api.core.v1.HTTPGetAction":{"description":"HTTPGetAction describes an action based on HTTP Get requests.","type":"object","required":["port"],"properties":{"host":{"description":"Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.","type":"string"},"httpHeaders":{"description":"Custom headers to set in the request. HTTP allows repeated headers.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPHeader"}},"path":{"description":"Path to access on the HTTP server.","type":"string"},"port":{"description":"Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"},"scheme":{"description":"Scheme to use for connecting to the host. Defaults to HTTP.\n\n","type":"string"}}},"io.k8s.api.core.v1.HTTPHeader":{"description":"HTTPHeader describes a custom header to be used in HTTP probes","type":"object","required":["name","value"],"properties":{"name":{"description":"The header field name","type":"string","default":""},"value":{"description":"The header field value","type":"string","default":""}}},"io.k8s.api.core.v1.HostAlias":{"description":"HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.","type":"object","properties":{"hostnames":{"description":"Hostnames for the above IP address.","type":"array","items":{"type":"string","default":""}},"ip":{"description":"IP address of the host file entry.","type":"string"}}},"io.k8s.api.core.v1.HostPathVolumeSource":{"description":"Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.","type":"object","required":["path"],"properties":{"path":{"description":"path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string","default":""},"type":{"description":"type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","type":"string"}}},"io.k8s.api.core.v1.ISCSIVolumeSource":{"description":"Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.","type":"object","required":["targetPortal","iqn","lun"],"properties":{"chapAuthDiscovery":{"description":"chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication","type":"boolean"},"chapAuthSession":{"description":"chapAuthSession defines whether support iSCSI Session CHAP authentication","type":"boolean"},"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi","type":"string"},"initiatorName":{"description":"initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.","type":"string"},"iqn":{"description":"iqn is the target iSCSI Qualified Name.","type":"string","default":""},"iscsiInterface":{"description":"iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).","type":"string"},"lun":{"description":"lun represents iSCSI Target Lun number.","type":"integer","format":"int32","default":0},"portals":{"description":"portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"array","items":{"type":"string","default":""}},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.","type":"boolean"},"secretRef":{"description":"secretRef is the CHAP Secret for iSCSI target and initiator authentication","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"targetPortal":{"description":"targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).","type":"string","default":""}}},"io.k8s.api.core.v1.KeyToPath":{"description":"Maps a string key to a path within a volume.","type":"object","required":["key","path"],"properties":{"key":{"description":"key is the key to project.","type":"string","default":""},"mode":{"description":"mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"path":{"description":"path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.","type":"string","default":""}}},"io.k8s.api.core.v1.Lifecycle":{"description":"Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.","type":"object","properties":{"postStart":{"description":"PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"},"preStop":{"description":"PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks","$ref":"#/components/schemas/io.k8s.api.core.v1.LifecycleHandler"}}},"io.k8s.api.core.v1.LifecycleHandler":{"description":"LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"tcpSocket":{"description":"Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"}}},"io.k8s.api.core.v1.LocalObjectReference":{"description":"LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NFSVolumeSource":{"description":"Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.","type":"object","required":["server","path"],"properties":{"path":{"description":"path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""},"readOnly":{"description":"readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"boolean"},"server":{"description":"server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","type":"string","default":""}}},"io.k8s.api.core.v1.NodeAffinity":{"description":"Node affinity is a group of node affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PreferredSchedulingTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.","$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelector"}}},"io.k8s.api.core.v1.NodeSelector":{"description":"A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.","type":"object","required":["nodeSelectorTerms"],"properties":{"nodeSelectorTerms":{"description":"Required. A list of node selector terms. The terms are ORed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.NodeSelectorRequirement":{"description":"A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"The label key that the selector applies to.","type":"string","default":""},"operator":{"description":"Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\n","type":"string","default":""},"values":{"description":"An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.NodeSelectorTerm":{"description":"A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.","type":"object","properties":{"matchExpressions":{"description":"A list of node selector requirements by node's labels.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}},"matchFields":{"description":"A list of node selector requirements by node's fields.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement"}}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectFieldSelector":{"description":"ObjectFieldSelector selects an APIVersioned field of an object.","type":"object","required":["fieldPath"],"properties":{"apiVersion":{"description":"Version of the schema the FieldPath is written in terms of, defaults to \"v1\".","type":"string"},"fieldPath":{"description":"Path of the field to select in the specified API version.","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ObjectReference":{"description":"ObjectReference contains enough information to let you inspect or modify the referred object.","type":"object","properties":{"apiVersion":{"description":"API version of the referent.","type":"string"},"fieldPath":{"description":"If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.","type":"string"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"namespace":{"description":"Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/","type":"string"},"resourceVersion":{"description":"Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids","type":"string"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.PersistentVolumeClaimSpec":{"description":"PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes","type":"object","properties":{"accessModes":{"description":"accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1","type":"array","items":{"type":"string","default":""}},"dataSource":{"description":"dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"dataSourceRef":{"description":"dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.","$ref":"#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference"},"resources":{"description":"resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.ResourceRequirements"},"selector":{"description":"selector is a label query over volumes to consider for binding.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"storageClassName":{"description":"storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1","type":"string"},"volumeMode":{"description":"volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.","type":"string"},"volumeName":{"description":"volumeName is the binding reference to the PersistentVolume backing this claim.","type":"string"}}},"io.k8s.api.core.v1.PersistentVolumeClaimTemplate":{"description":"PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.","type":"object","required":["spec"],"properties":{"metadata":{"description":"May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimSpec"}}},"io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource":{"description":"PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).","type":"object","required":["claimName"],"properties":{"claimName":{"description":"claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","type":"string","default":""},"readOnly":{"description":"readOnly Will force the ReadOnly setting in VolumeMounts. Default false.","type":"boolean"}}},"io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource":{"description":"Represents a Photon Controller persistent disk resource.","type":"object","required":["pdID"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"pdID":{"description":"pdID is the ID that identifies Photon Controller persistent disk","type":"string","default":""}}},"io.k8s.api.core.v1.PodAffinity":{"description":"Pod affinity is a group of inter pod affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodAffinityTerm":{"description":"Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running","type":"object","required":["topologyKey"],"properties":{"labelSelector":{"description":"A label query over a set of resources, in this case pods.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaceSelector":{"description":"A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"namespaces":{"description":"namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"","type":"array","items":{"type":"string","default":""}},"topologyKey":{"description":"This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.","type":"string","default":""}}},"io.k8s.api.core.v1.PodAntiAffinity":{"description":"Pod anti affinity is a group of inter pod anti affinity scheduling rules.","type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"description":"The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm"}},"requiredDuringSchedulingIgnoredDuringExecution":{"description":"If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"}}}},"io.k8s.api.core.v1.PodDNSConfig":{"description":"PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.","type":"object","properties":{"nameservers":{"description":"A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.","type":"array","items":{"type":"string","default":""}},"options":{"description":"A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfigOption"}},"searches":{"description":"A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.api.core.v1.PodDNSConfigOption":{"description":"PodDNSConfigOption defines DNS resolver options of a pod.","type":"object","properties":{"name":{"description":"Required.","type":"string"},"value":{"type":"string"}}},"io.k8s.api.core.v1.PodOS":{"description":"PodOS defines the OS parameters of a pod.","type":"object","required":["name"],"properties":{"name":{"description":"Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null","type":"string","default":""}}},"io.k8s.api.core.v1.PodReadinessGate":{"description":"PodReadinessGate contains the reference to a pod condition","type":"object","required":["conditionType"],"properties":{"conditionType":{"description":"ConditionType refers to a condition in the pod's condition list with matching type.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.PodSecurityContext":{"description":"PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.","type":"object","properties":{"fsGroup":{"description":"A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"fsGroupChangePolicy":{"description":"fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"supplementalGroups":{"description":"A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"type":"integer","format":"int64","default":0}},"sysctls":{"description":"Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Sysctl"}},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.PodSpec":{"description":"PodSpec is a description of a pod.","type":"object","required":["containers"],"properties":{"activeDeadlineSeconds":{"description":"Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.","type":"integer","format":"int64"},"affinity":{"description":"If specified, the pod's scheduling constraints","$ref":"#/components/schemas/io.k8s.api.core.v1.Affinity"},"automountServiceAccountToken":{"description":"AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.","type":"boolean"},"containers":{"description":"List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"dnsConfig":{"description":"Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodDNSConfig"},"dnsPolicy":{"description":"Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\n","type":"string"},"enableServiceLinks":{"description":"EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.","type":"boolean"},"ephemeralContainers":{"description":"List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralContainer"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"hostAliases":{"description":"HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.HostAlias"},"x-kubernetes-patch-merge-key":"ip","x-kubernetes-patch-strategy":"merge"},"hostIPC":{"description":"Use the host's ipc namespace. Optional: Default to false.","type":"boolean"},"hostNetwork":{"description":"Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.","type":"boolean"},"hostPID":{"description":"Use the host's pid namespace. Optional: Default to false.","type":"boolean"},"hostname":{"description":"Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.","type":"string"},"imagePullSecrets":{"description":"ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"initContainers":{"description":"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Container"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge"},"nodeName":{"description":"NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.","type":"string"},"nodeSelector":{"description":"NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/","type":"object","additionalProperties":{"type":"string","default":""},"x-kubernetes-map-type":"atomic"},"os":{"description":"Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature","$ref":"#/components/schemas/io.k8s.api.core.v1.PodOS"},"overhead":{"description":"Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"preemptionPolicy":{"description":"PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.","type":"string"},"priority":{"description":"The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.","type":"integer","format":"int32"},"priorityClassName":{"description":"If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.","type":"string"},"readinessGates":{"description":"If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodReadinessGate"}},"restartPolicy":{"description":"Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n\n","type":"string"},"runtimeClassName":{"description":"RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.","type":"string"},"schedulerName":{"description":"If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.","type":"string"},"securityContext":{"description":"SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.","$ref":"#/components/schemas/io.k8s.api.core.v1.PodSecurityContext"},"serviceAccount":{"description":"DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.","type":"string"},"serviceAccountName":{"description":"ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/","type":"string"},"setHostnameAsFQDN":{"description":"If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.","type":"boolean"},"shareProcessNamespace":{"description":"Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.","type":"boolean"},"subdomain":{"description":"If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.","type":"string"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.","type":"integer","format":"int64"},"tolerations":{"description":"If specified, the pod's tolerations.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Toleration"}},"topologySpreadConstraints":{"description":"TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.TopologySpreadConstraint"},"x-kubernetes-list-map-keys":["topologyKey","whenUnsatisfiable"],"x-kubernetes-list-type":"map","x-kubernetes-patch-merge-key":"topologyKey","x-kubernetes-patch-strategy":"merge"},"volumes":{"description":"List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.Volume"},"x-kubernetes-patch-merge-key":"name","x-kubernetes-patch-strategy":"merge,retainKeys"}}},"io.k8s.api.core.v1.PodTemplateSpec":{"description":"PodTemplateSpec describes the data a pod should have when created from a template","type":"object","properties":{"metadata":{"description":"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"},"spec":{"description":"Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodSpec"}}},"io.k8s.api.core.v1.PortworxVolumeSource":{"description":"PortworxVolumeSource represents a Portworx volume resource.","type":"object","required":["volumeID"],"properties":{"fsType":{"description":"fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"volumeID":{"description":"volumeID uniquely identifies a Portworx volume","type":"string","default":""}}},"io.k8s.api.core.v1.PreferredSchedulingTerm":{"description":"An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).","type":"object","required":["weight","preference"],"properties":{"preference":{"description":"A node selector term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm"},"weight":{"description":"Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.Probe":{"description":"Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.","type":"object","properties":{"exec":{"description":"Exec specifies the action to take.","$ref":"#/components/schemas/io.k8s.api.core.v1.ExecAction"},"failureThreshold":{"description":"Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.","type":"integer","format":"int32"},"grpc":{"description":"GRPC specifies an action involving a GRPC port.","$ref":"#/components/schemas/io.k8s.api.core.v1.GRPCAction"},"httpGet":{"description":"HTTPGet specifies the http request to perform.","$ref":"#/components/schemas/io.k8s.api.core.v1.HTTPGetAction"},"initialDelaySeconds":{"description":"Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"},"periodSeconds":{"description":"How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.","type":"integer","format":"int32"},"successThreshold":{"description":"Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.","type":"integer","format":"int32"},"tcpSocket":{"description":"TCPSocket specifies an action involving a TCP port.","$ref":"#/components/schemas/io.k8s.api.core.v1.TCPSocketAction"},"terminationGracePeriodSeconds":{"description":"Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.","type":"integer","format":"int64"},"timeoutSeconds":{"description":"Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes","type":"integer","format":"int32"}}},"io.k8s.api.core.v1.ProjectedVolumeSource":{"description":"Represents a projected volume source","type":"object","properties":{"defaultMode":{"description":"defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"sources":{"description":"sources is the list of volume projections","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.VolumeProjection"}}}},"io.k8s.api.core.v1.QuobyteVolumeSource":{"description":"Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.","type":"object","required":["registry","volume"],"properties":{"group":{"description":"group to map volume access to Default is no group","type":"string"},"readOnly":{"description":"readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.","type":"boolean"},"registry":{"description":"registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes","type":"string","default":""},"tenant":{"description":"tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin","type":"string"},"user":{"description":"user to map volume access to Defaults to serivceaccount user","type":"string"},"volume":{"description":"volume is a string that references an already created Quobyte volume by name.","type":"string","default":""}}},"io.k8s.api.core.v1.RBDVolumeSource":{"description":"Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.","type":"object","required":["monitors","image"],"properties":{"fsType":{"description":"fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd","type":"string"},"image":{"description":"image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string","default":""},"keyring":{"description":"keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"monitors":{"description":"monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"array","items":{"type":"string","default":""}},"pool":{"description":"pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"},"readOnly":{"description":"readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"boolean"},"secretRef":{"description":"secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"user":{"description":"user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it","type":"string"}}},"io.k8s.api.core.v1.ResourceFieldSelector":{"description":"ResourceFieldSelector represents container resources (cpu, memory) and their output format","type":"object","required":["resource"],"properties":{"containerName":{"description":"Container name: required for volumes, optional for env vars","type":"string"},"divisor":{"description":"Specifies the output format of the exposed resources, defaults to \"1\"","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"},"resource":{"description":"Required: resource to select","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.ResourceRequirements":{"description":"ResourceRequirements describes the compute resource requirements.","type":"object","properties":{"limits":{"description":"Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}},"requests":{"description":"Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/","type":"object","additionalProperties":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"}}}},"io.k8s.api.core.v1.SELinuxOptions":{"description":"SELinuxOptions are the labels to be applied to the container","type":"object","properties":{"level":{"description":"Level is SELinux level label that applies to the container.","type":"string"},"role":{"description":"Role is a SELinux role label that applies to the container.","type":"string"},"type":{"description":"Type is a SELinux type label that applies to the container.","type":"string"},"user":{"description":"User is a SELinux user label that applies to the container.","type":"string"}}},"io.k8s.api.core.v1.ScaleIOVolumeSource":{"description":"ScaleIOVolumeSource represents a persistent ScaleIO volume","type":"object","required":["gateway","system","secretRef"],"properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".","type":"string"},"gateway":{"description":"gateway is the host address of the ScaleIO API Gateway.","type":"string","default":""},"protectionDomain":{"description":"protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.","type":"string"},"readOnly":{"description":"readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"sslEnabled":{"description":"sslEnabled Flag enable/disable SSL communication with Gateway, default false","type":"boolean"},"storageMode":{"description":"storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.","type":"string"},"storagePool":{"description":"storagePool is the ScaleIO Storage Pool associated with the protection domain.","type":"string"},"system":{"description":"system is the name of the storage system as configured in ScaleIO.","type":"string","default":""},"volumeName":{"description":"volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.","type":"string"}}},"io.k8s.api.core.v1.SeccompProfile":{"description":"SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.","type":"object","required":["type"],"properties":{"localhostProfile":{"description":"localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".","type":"string"},"type":{"description":"type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.\n\n","type":"string","default":""}},"x-kubernetes-unions":[{"discriminator":"type","fields-to-discriminateBy":{"localhostProfile":"LocalhostProfile"}}]},"io.k8s.api.core.v1.SecretEnvSource":{"description":"SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.","type":"object","properties":{"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretKeySelector":{"description":"SecretKeySelector selects a key of a Secret.","type":"object","required":["key"],"properties":{"key":{"description":"The key of the secret to select from. Must be a valid secret key.","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"Specify whether the Secret or its key must be defined","type":"boolean"}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.SecretProjection":{"description":"Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.","type":"object","properties":{"items":{"description":"items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string"},"optional":{"description":"optional field specify whether the Secret or its key must be defined","type":"boolean"}}},"io.k8s.api.core.v1.SecretVolumeSource":{"description":"Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.","type":"object","properties":{"defaultMode":{"description":"defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.","type":"integer","format":"int32"},"items":{"description":"items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.KeyToPath"}},"optional":{"description":"optional field specify whether the Secret or its keys must be defined","type":"boolean"},"secretName":{"description":"secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","type":"string"}}},"io.k8s.api.core.v1.SecurityContext":{"description":"SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.","type":"object","properties":{"allowPrivilegeEscalation":{"description":"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"capabilities":{"description":"The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.Capabilities"},"privileged":{"description":"Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"procMount":{"description":"procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.","type":"string"},"readOnlyRootFilesystem":{"description":"Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.","type":"boolean"},"runAsGroup":{"description":"The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"runAsNonRoot":{"description":"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"boolean"},"runAsUser":{"description":"The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","type":"integer","format":"int64"},"seLinuxOptions":{"description":"The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SELinuxOptions"},"seccompProfile":{"description":"The seccomp options to use by this container. If seccomp options are provided at both the pod \u0026 container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.","$ref":"#/components/schemas/io.k8s.api.core.v1.SeccompProfile"},"windowsOptions":{"description":"The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.","$ref":"#/components/schemas/io.k8s.api.core.v1.WindowsSecurityContextOptions"}}},"io.k8s.api.core.v1.ServiceAccountTokenProjection":{"description":"ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).","type":"object","required":["path"],"properties":{"audience":{"description":"audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.","type":"string"},"expirationSeconds":{"description":"expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.","type":"integer","format":"int64"},"path":{"description":"path is the path relative to the mount point of the file to project the token into.","type":"string","default":""}}},"io.k8s.api.core.v1.StorageOSVolumeSource":{"description":"Represents a StorageOS persistent volume resource.","type":"object","properties":{"fsType":{"description":"fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"readOnly":{"description":"readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.","type":"boolean"},"secretRef":{"description":"secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.","$ref":"#/components/schemas/io.k8s.api.core.v1.LocalObjectReference"},"volumeName":{"description":"volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.","type":"string"},"volumeNamespace":{"description":"volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.","type":"string"}}},"io.k8s.api.core.v1.Sysctl":{"description":"Sysctl defines a kernel parameter to be set","type":"object","required":["name","value"],"properties":{"name":{"description":"Name of a property to set","type":"string","default":""},"value":{"description":"Value of a property to set","type":"string","default":""}}},"io.k8s.api.core.v1.TCPSocketAction":{"description":"TCPSocketAction describes an action based on opening a socket","type":"object","required":["port"],"properties":{"host":{"description":"Optional: Host name to connect to, defaults to the pod IP.","type":"string"},"port":{"description":"Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}}},"io.k8s.api.core.v1.Toleration":{"description":"The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.","type":"object","properties":{"effect":{"description":"Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n\n","type":"string"},"key":{"description":"Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.","type":"string"},"operator":{"description":"Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.\n\n","type":"string"},"tolerationSeconds":{"description":"TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.","type":"integer","format":"int64"},"value":{"description":"Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.","type":"string"}}},"io.k8s.api.core.v1.TopologySpreadConstraint":{"description":"TopologySpreadConstraint specifies how to spread matching pods among the given topology.","type":"object","required":["maxSkew","topologyKey","whenUnsatisfiable"],"properties":{"labelSelector":{"description":"LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"},"maxSkew":{"description":"MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.","type":"integer","format":"int32","default":0},"topologyKey":{"description":"TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.","type":"string","default":""},"whenUnsatisfiable":{"description":"WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.\n\n","type":"string","default":""}}},"io.k8s.api.core.v1.TypedLocalObjectReference":{"description":"TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.","type":"object","required":["kind","name"],"properties":{"apiGroup":{"description":"APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.","type":"string"},"kind":{"description":"Kind is the type of resource being referenced","type":"string","default":""},"name":{"description":"Name is the name of resource being referenced","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.api.core.v1.Volume":{"description":"Volume represents a named volume in a pod that may be accessed by any container in the pod.","type":"object","required":["name"],"properties":{"awsElasticBlockStore":{"description":"awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore","$ref":"#/components/schemas/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"},"azureDisk":{"description":"azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureDiskVolumeSource"},"azureFile":{"description":"azureFile represents an Azure File Service mount on the host and bind mount to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.AzureFileVolumeSource"},"cephfs":{"description":"cephFS represents a Ceph FS mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.CephFSVolumeSource"},"cinder":{"description":"cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.CinderVolumeSource"},"configMap":{"description":"configMap represents a configMap that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapVolumeSource"},"csi":{"description":"csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).","$ref":"#/components/schemas/io.k8s.api.core.v1.CSIVolumeSource"},"downwardAPI":{"description":"downwardAPI represents downward API about the pod that should populate this volume","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeSource"},"emptyDir":{"description":"emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir","$ref":"#/components/schemas/io.k8s.api.core.v1.EmptyDirVolumeSource"},"ephemeral":{"description":"ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.","$ref":"#/components/schemas/io.k8s.api.core.v1.EphemeralVolumeSource"},"fc":{"description":"fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.","$ref":"#/components/schemas/io.k8s.api.core.v1.FCVolumeSource"},"flexVolume":{"description":"flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.","$ref":"#/components/schemas/io.k8s.api.core.v1.FlexVolumeSource"},"flocker":{"description":"flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running","$ref":"#/components/schemas/io.k8s.api.core.v1.FlockerVolumeSource"},"gcePersistentDisk":{"description":"gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk","$ref":"#/components/schemas/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"},"gitRepo":{"description":"gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.","$ref":"#/components/schemas/io.k8s.api.core.v1.GitRepoVolumeSource"},"glusterfs":{"description":"glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.GlusterfsVolumeSource"},"hostPath":{"description":"hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath","$ref":"#/components/schemas/io.k8s.api.core.v1.HostPathVolumeSource"},"iscsi":{"description":"iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.ISCSIVolumeSource"},"name":{"description":"name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names","type":"string","default":""},"nfs":{"description":"nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs","$ref":"#/components/schemas/io.k8s.api.core.v1.NFSVolumeSource"},"persistentVolumeClaim":{"description":"persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims","$ref":"#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"},"photonPersistentDisk":{"description":"photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"},"portworxVolume":{"description":"portworxVolume represents a portworx volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.PortworxVolumeSource"},"projected":{"description":"projected items for all in one resources secrets, configmaps, and downward API","$ref":"#/components/schemas/io.k8s.api.core.v1.ProjectedVolumeSource"},"quobyte":{"description":"quobyte represents a Quobyte mount on the host that shares a pod's lifetime","$ref":"#/components/schemas/io.k8s.api.core.v1.QuobyteVolumeSource"},"rbd":{"description":"rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md","$ref":"#/components/schemas/io.k8s.api.core.v1.RBDVolumeSource"},"scaleIO":{"description":"scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.ScaleIOVolumeSource"},"secret":{"description":"secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretVolumeSource"},"storageos":{"description":"storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.","$ref":"#/components/schemas/io.k8s.api.core.v1.StorageOSVolumeSource"},"vsphereVolume":{"description":"vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine","$ref":"#/components/schemas/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"}}},"io.k8s.api.core.v1.VolumeDevice":{"description":"volumeDevice describes a mapping of a raw block device within a container.","type":"object","required":["name","devicePath"],"properties":{"devicePath":{"description":"devicePath is the path inside of the container that the device will be mapped to.","type":"string","default":""},"name":{"description":"name must match the name of a persistentVolumeClaim in the pod","type":"string","default":""}}},"io.k8s.api.core.v1.VolumeMount":{"description":"VolumeMount describes a mounting of a Volume within a container.","type":"object","required":["name","mountPath"],"properties":{"mountPath":{"description":"Path within the container at which the volume should be mounted. Must not contain ':'.","type":"string","default":""},"mountPropagation":{"description":"mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.","type":"string"},"name":{"description":"This must match the Name of a Volume.","type":"string","default":""},"readOnly":{"description":"Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.","type":"boolean"},"subPath":{"description":"Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).","type":"string"},"subPathExpr":{"description":"Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.","type":"string"}}},"io.k8s.api.core.v1.VolumeProjection":{"description":"Projection that may be projected along with other supported volume types","type":"object","properties":{"configMap":{"description":"configMap information about the configMap data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ConfigMapProjection"},"downwardAPI":{"description":"downwardAPI information about the downwardAPI data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.DownwardAPIProjection"},"secret":{"description":"secret information about the secret data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.SecretProjection"},"serviceAccountToken":{"description":"serviceAccountToken is information about the serviceAccountToken data to project","$ref":"#/components/schemas/io.k8s.api.core.v1.ServiceAccountTokenProjection"}}},"io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource":{"description":"Represents a vSphere volume resource.","type":"object","required":["volumePath"],"properties":{"fsType":{"description":"fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.","type":"string"},"storagePolicyID":{"description":"storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.","type":"string"},"storagePolicyName":{"description":"storagePolicyName is the storage Policy Based Management (SPBM) profile name.","type":"string"},"volumePath":{"description":"volumePath is the path that identifies vSphere volume vmdk","type":"string","default":""}}},"io.k8s.api.core.v1.WeightedPodAffinityTerm":{"description":"The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)","type":"object","required":["weight","podAffinityTerm"],"properties":{"podAffinityTerm":{"description":"Required. A pod affinity term, associated with the corresponding weight.","default":{},"$ref":"#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm"},"weight":{"description":"weight associated with matching the corresponding podAffinityTerm, in the range 1-100.","type":"integer","format":"int32","default":0}}},"io.k8s.api.core.v1.WindowsSecurityContextOptions":{"description":"WindowsSecurityContextOptions contain Windows-specific options and credentials.","type":"object","properties":{"gmsaCredentialSpec":{"description":"GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.","type":"string"},"gmsaCredentialSpecName":{"description":"GMSACredentialSpecName is the name of the GMSA credential spec to use.","type":"string"},"hostProcess":{"description":"HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.","type":"boolean"},"runAsUserName":{"description":"The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.","type":"string"}}},"io.k8s.apimachinery.pkg.api.resource.Quantity":{"description":"Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.","type":"string"},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResource":{"description":"APIResource specifies the name of a resource and whether it is namespaced.","type":"object","required":["name","singularName","namespaced","kind","verbs"],"properties":{"categories":{"description":"categories is a list of the grouped resources this resource belongs to (e.g. 'all')","type":"array","items":{"type":"string","default":""}},"group":{"description":"group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".","type":"string"},"kind":{"description":"kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')","type":"string","default":""},"name":{"description":"name is the plural name of the resource.","type":"string","default":""},"namespaced":{"description":"namespaced indicates if a resource is namespaced or not.","type":"boolean","default":false},"shortNames":{"description":"shortNames is a list of suggested short names of the resource.","type":"array","items":{"type":"string","default":""}},"singularName":{"description":"singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.","type":"string","default":""},"storageVersionHash":{"description":"The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.","type":"string"},"verbs":{"description":"verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)","type":"array","items":{"type":"string","default":""}},"version":{"description":"version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList":{"description":"APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.","type":"object","required":["groupVersion","resources"],"properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"groupVersion":{"description":"groupVersion is the group and version this APIResourceList is for.","type":"string","default":""},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"resources":{"description":"resources contains the name of the resources and if they are namespaced.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"}}},"x-kubernetes-group-version-kind":[{"group":"","kind":"APIResourceList","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions":{"description":"DeleteOptions may be provided when deleting an API object.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"dryRun":{"description":"When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed","type":"array","items":{"type":"string","default":""}},"gracePeriodSeconds":{"description":"The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.","type":"integer","format":"int64"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"orphanDependents":{"description":"Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.","type":"boolean"},"preconditions":{"description":"Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"},"propagationPolicy":{"description":"Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admission.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiextensions.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"apiregistration.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta1"},{"group":"apps","kind":"DeleteOptions","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authentication.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta1"},{"group":"autoscaling","kind":"DeleteOptions","version":"v2beta2"},{"group":"batch","kind":"DeleteOptions","version":"v1"},{"group":"batch","kind":"DeleteOptions","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"certificates.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"coordination.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"discovery.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"events.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"extensions","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"DeleteOptions","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"networking.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"node.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"policy","kind":"DeleteOptions","version":"v1"},{"group":"policy","kind":"DeleteOptions","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"DeleteOptions","version":"v1beta1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"DeleteOptions","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1":{"description":"FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector":{"description":"A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.","type":"object","properties":{"matchExpressions":{"description":"matchExpressions is a list of label selector requirements. The requirements are ANDed.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"}},"matchLabels":{"description":"matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.","type":"object","additionalProperties":{"type":"string","default":""}}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement":{"description":"A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.","type":"object","required":["key","operator"],"properties":{"key":{"description":"key is the label key that the selector applies to.","type":"string","default":"","x-kubernetes-patch-merge-key":"key","x-kubernetes-patch-strategy":"merge"},"operator":{"description":"operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.","type":"string","default":""},"values":{"description":"values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.","type":"array","items":{"type":"string","default":""}}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta":{"description":"ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.","type":"object","properties":{"continue":{"description":"continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.","type":"string"},"remainingItemCount":{"description":"remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.","type":"integer","format":"int64"},"resourceVersion":{"description":"String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry":{"description":"ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.","type":"string"},"fieldsType":{"description":"FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"","type":"string"},"fieldsV1":{"description":"FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"},"manager":{"description":"Manager is an identifier of the workflow managing these fields.","type":"string"},"operation":{"description":"Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.","type":"string"},"subresource":{"description":"Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.","type":"string"},"time":{"description":"Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta":{"description":"ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.","type":"object","properties":{"annotations":{"description":"Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations","type":"object","additionalProperties":{"type":"string","default":""}},"clusterName":{"description":"The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.","type":"string"},"creationTimestamp":{"description":"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"deletionGracePeriodSeconds":{"description":"Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.","type":"integer","format":"int64"},"deletionTimestamp":{"description":"DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"},"finalizers":{"description":"Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.","type":"array","items":{"type":"string","default":""},"x-kubernetes-patch-strategy":"merge"},"generateName":{"description":"GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency","type":"string"},"generation":{"description":"A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.","type":"integer","format":"int64"},"labels":{"description":"Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels","type":"object","additionalProperties":{"type":"string","default":""}},"managedFields":{"description":"ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"}},"name":{"description":"Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string"},"namespace":{"description":"Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces","type":"string"},"ownerReferences":{"description":"List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"},"x-kubernetes-patch-merge-key":"uid","x-kubernetes-patch-strategy":"merge"},"resourceVersion":{"description":"An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency","type":"string"},"selfLink":{"description":"selfLink is DEPRECATED read-only field that is no longer populated by the system.","type":"string"},"uid":{"description":"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference":{"description":"OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.","type":"object","required":["apiVersion","kind","name","uid"],"properties":{"apiVersion":{"description":"API version of the referent.","type":"string","default":""},"blockOwnerDeletion":{"description":"If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.","type":"boolean"},"controller":{"description":"If true, this reference points to the managing controller.","type":"boolean"},"kind":{"description":"Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string","default":""},"name":{"description":"Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names","type":"string","default":""},"uid":{"description":"UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string","default":""}},"x-kubernetes-map-type":"atomic"},"io.k8s.apimachinery.pkg.apis.meta.v1.Patch":{"description":"Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.","type":"object"},"io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions":{"description":"Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.","type":"object","properties":{"resourceVersion":{"description":"Specifies the target ResourceVersion","type":"string"},"uid":{"description":"Specifies the target UID.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Status":{"description":"Status is a return value for calls that don't return other objects.","type":"object","properties":{"apiVersion":{"description":"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources","type":"string"},"code":{"description":"Suggested HTTP return code for this status, 0 if not set.","type":"integer","format":"int32"},"details":{"description":"Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.","$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"},"kind":{"description":"Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"message":{"description":"A human-readable description of the status of this operation.","type":"string"},"metadata":{"description":"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"},"reason":{"description":"A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.","type":"string"},"status":{"description":"Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status","type":"string"}},"x-kubernetes-group-version-kind":[{"group":"","kind":"Status","version":"v1"}]},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause":{"description":"StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.","type":"object","properties":{"field":{"description":"The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"","type":"string"},"message":{"description":"A human-readable description of the cause of the error. This field may be presented as-is to a reader.","type":"string"},"reason":{"description":"A machine-readable description of the cause of the error. If this value is empty there is no information available.","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails":{"description":"StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.","type":"object","properties":{"causes":{"description":"The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.","type":"array","items":{"default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"}},"group":{"description":"The group attribute of the resource associated with the status StatusReason.","type":"string"},"kind":{"description":"The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds","type":"string"},"name":{"description":"The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).","type":"string"},"retryAfterSeconds":{"description":"If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.","type":"integer","format":"int32"},"uid":{"description":"UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids","type":"string"}}},"io.k8s.apimachinery.pkg.apis.meta.v1.Time":{"description":"Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.","type":"string","format":"date-time"},"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent":{"description":"Event represents a single event to a watched resource.","type":"object","required":["type","object"],"properties":{"object":{"description":"Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.","default":{},"$ref":"#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension"},"type":{"type":"string","default":""}},"x-kubernetes-group-version-kind":[{"group":"","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admission.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"admissionregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiextensions.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"apiregistration.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1"},{"group":"apps","kind":"WatchEvent","version":"v1beta1"},{"group":"apps","kind":"WatchEvent","version":"v1beta2"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authentication.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta1"},{"group":"autoscaling","kind":"WatchEvent","version":"v2beta2"},{"group":"batch","kind":"WatchEvent","version":"v1"},{"group":"batch","kind":"WatchEvent","version":"v1beta1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"certificates.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"coordination.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"discovery.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"events.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"extensions","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"flowcontrol.apiserver.k8s.io","kind":"WatchEvent","version":"v1beta2"},{"group":"imagepolicy.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"internal.apiserver.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"networking.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"node.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"policy","kind":"WatchEvent","version":"v1"},{"group":"policy","kind":"WatchEvent","version":"v1beta1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"rbac.authorization.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"scheduling.k8s.io","kind":"WatchEvent","version":"v1beta1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1alpha1"},{"group":"storage.k8s.io","kind":"WatchEvent","version":"v1beta1"}]},"io.k8s.apimachinery.pkg.runtime.RawExtension":{"description":"RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)","type":"object"},"io.k8s.apimachinery.pkg.util.intstr.IntOrString":{"description":"IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.","type":"string","format":"int-or-string"}}}} diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 585824c6f7237..d24a71e003103 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -2,7 +2,7 @@ module k8s.io/client-go -go 1.20 +go 1.21.3 require ( github.com/evanphx/json-patch v4.12.0+incompatible @@ -18,16 +18,16 @@ require ( github.com/imdario/mergo v0.3.6 github.com/peterbourgon/diskv v2.0.1+incompatible github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - golang.org/x/net v0.13.0 - golang.org/x/oauth2 v0.8.0 - golang.org/x/term v0.10.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.17.0 + golang.org/x/oauth2 v0.10.0 + golang.org/x/term v0.13.0 golang.org/x/time v0.3.0 google.golang.org/protobuf v1.31.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/structured-merge-diff/v4 v4.3.0 @@ -36,7 +36,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -49,11 +49,11 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/gomega v1.27.6 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum index 0871c82611885..78bb6035f809f 100644 --- a/staging/src/k8s.io/client-go/go.sum +++ b/staging/src/k8s.io/client-go/go.sum @@ -1,4 +1,5 @@ -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -7,8 +8,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -75,11 +76,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -97,14 +99,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -113,33 +115,33 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -162,11 +164,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index 8cff380f4a500..e067de3c19bec 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -289,8 +289,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil // Group=networking.k8s.io, Version=v1alpha1 - case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil diff --git a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go deleted file mode 100644 index cefd0f8a1ee70..0000000000000 --- a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" - cache "k8s.io/client-go/tools/cache" -) - -// ClusterCIDRInformer provides access to a shared informer and lister for -// ClusterCIDRs. -type ClusterCIDRInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterCIDRLister -} - -type clusterCIDRInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options) - }, - }, - &networkingv1alpha1.ClusterCIDR{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer) -} - -func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister { - return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer()) -} diff --git a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go index 07e7d208ca26c..d909f908fe35d 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go @@ -24,8 +24,6 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ClusterCIDRs returns a ClusterCIDRInformer. - ClusterCIDRs() ClusterCIDRInformer // IPAddresses returns a IPAddressInformer. IPAddresses() IPAddressInformer } @@ -41,11 +39,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// ClusterCIDRs returns a ClusterCIDRInformer. -func (v *version) ClusterCIDRs() ClusterCIDRInformer { - return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // IPAddresses returns a IPAddressInformer. func (v *version) IPAddresses() IPAddressInformer { return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go deleted file mode 100644 index 9df76351db8c4..0000000000000 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. -// A group's client should implement this interface. -type ClusterCIDRsGetter interface { - ClusterCIDRs() ClusterCIDRInterface -} - -// ClusterCIDRInterface has methods to work with ClusterCIDR resources. -type ClusterCIDRInterface interface { - Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) - Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) - Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) - ClusterCIDRExpansion -} - -// clusterCIDRs implements ClusterCIDRInterface -type clusterCIDRs struct { - client rest.Interface -} - -// newClusterCIDRs returns a ClusterCIDRs -func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { - return &clusterCIDRs{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Get(). - Resource("clustercidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterCIDRList{} - err = c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Post(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Put(). - Resource("clustercidrs"). - Name(clusterCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustercidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustercidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(pt). - Resource("clustercidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustercidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go deleted file mode 100644 index 592e9fc63dc4c..0000000000000 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterCIDRs implements ClusterCIDRInterface -type FakeClusterCIDRs struct { - Fake *FakeNetworkingV1alpha1 -} - -var clustercidrsResource = v1alpha1.SchemeGroupVersion.WithResource("clustercidrs") - -var clustercidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR") - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts)) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go index 2d063836b53f9..63f5114cf2617 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -28,10 +28,6 @@ type FakeNetworkingV1alpha1 struct { *testing.Fake } -func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface { - return &FakeClusterCIDRs{c} -} - func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface { return &FakeIPAddresses{c} } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index 9c2979d6c44ff..0ded7944ee68a 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -18,6 +18,4 @@ limitations under the License. package v1alpha1 -type ClusterCIDRExpansion interface{} - type IPAddressExpansion interface{} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index 884c846f59836..59bddd70b50d4 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -28,7 +28,6 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface - ClusterCIDRsGetter IPAddressesGetter } @@ -37,10 +36,6 @@ type NetworkingV1alpha1Client struct { restClient rest.Interface } -func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { - return newClusterCIDRs(c) -} - func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { return newIPAddresses(c) } diff --git a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go deleted file mode 100644 index dca9d7bf0cb29..0000000000000 --- a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ClusterCIDRLister helps list ClusterCIDRs. -// All objects returned here must be treated as read-only. -type ClusterCIDRLister interface { - // List lists all ClusterCIDRs in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) - // Get retrieves the ClusterCIDR from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterCIDR, error) - ClusterCIDRListerExpansion -} - -// clusterCIDRLister implements the ClusterCIDRLister interface. -type clusterCIDRLister struct { - indexer cache.Indexer -} - -// NewClusterCIDRLister returns a new ClusterCIDRLister. -func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister { - return &clusterCIDRLister{indexer: indexer} -} - -// List lists all ClusterCIDRs in the indexer. -func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterCIDR)) - }) - return ret, err -} - -// Get retrieves the ClusterCIDR from the index for a given name. -func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name) - } - return obj.(*v1alpha1.ClusterCIDR), nil -} diff --git a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go index d57b71b0059c5..afa9aabcdcdda 100644 --- a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go @@ -18,10 +18,6 @@ limitations under the License. package v1alpha1 -// ClusterCIDRListerExpansion allows custom methods to be added to -// ClusterCIDRLister. -type ClusterCIDRListerExpansion interface{} - // IPAddressListerExpansion allows custom methods to be added to // IPAddressLister. type IPAddressListerExpansion interface{} diff --git a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/api__v1_openapi.json b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/api__v1_openapi.json index 915b13a9f3c9d..d039c7a6f8582 100644 --- a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/api__v1_openapi.json +++ b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/api__v1_openapi.json @@ -5617,7 +5617,7 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.GRPCAction" } ], - "description": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate." + "description": "GRPC specifies an action involving a GRPC port." }, "httpGet": { "allOf": [ diff --git a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__apps__v1_openapi.json b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__apps__v1_openapi.json index 6c866dd1f8679..21c3c4c372187 100644 --- a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__apps__v1_openapi.json +++ b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__apps__v1_openapi.json @@ -3845,7 +3845,7 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.GRPCAction" } ], - "description": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate." + "description": "GRPC specifies an action involving a GRPC port." }, "httpGet": { "allOf": [ diff --git a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__batch__v1_openapi.json b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__batch__v1_openapi.json index da7f7bca65856..b43168bb19bd5 100644 --- a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__batch__v1_openapi.json +++ b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__batch__v1_openapi.json @@ -3019,7 +3019,7 @@ "$ref": "#/components/schemas/io.k8s.api.core.v1.GRPCAction" } ], - "description": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate." + "description": "GRPC specifies an action involving a GRPC port." }, "httpGet": { "allOf": [ diff --git a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__discovery.k8s.io__v1_openapi.json b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__discovery.k8s.io__v1_openapi.json index b0ee246407f6e..4e31acace454e 100644 --- a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__discovery.k8s.io__v1_openapi.json +++ b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__discovery.k8s.io__v1_openapi.json @@ -144,7 +144,7 @@ "type": "string" }, "name": { - "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { diff --git a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json index 9d36d38a155ed..51b5f5d1ff9d3 100644 --- a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json +++ b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json @@ -82,123 +82,6 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.networking.v1alpha1.ClusterCIDR": { - "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec" - } - ], - "default": {}, - "description": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { - "description": "ClusterCIDRList contains a list of ClusterCIDR.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "items is the list of ClusterCIDRs.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - ], - "default": {} - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDRList", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { - "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "properties": { - "ipv4": { - "default": "", - "description": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "ipv6": { - "default": "", - "description": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "nodeSelector": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" - } - ], - "description": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable." - }, - "perNodeHostBits": { - "default": 0, - "description": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "format": "int32", - "type": "integer" - } - }, - "required": [ - "perNodeHostBits" - ], - "type": "object" - }, "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { "description": "APIResource specifies the name of a resource and whether it is namespaced.", "properties": { @@ -1361,1090 +1244,6 @@ "networking_v1alpha1" ] } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs": { - "delete": { - "description": "delete collection of ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", - "parameters": [ - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "description": "list or watch objects of kind ClusterCIDR", - "operationId": "listNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "post": { - "description": "create a ClusterCIDR", - "operationId": "createNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { - "delete": { - "description": "delete a ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "description": "read the specified ClusterCIDR", - "operationId": "readNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "patch": { - "description": "partially update the specified ClusterCIDR", - "operationId": "patchNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "in": "query", - "name": "force", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "application/apply-patch+yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/json-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/strategic-merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "put": { - "description": "replace the specified ClusterCIDR", - "operationId": "replaceNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { - "get": { - "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchNetworkingV1alpha1ClusterCIDRList", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { - "get": { - "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] } } } diff --git a/staging/src/k8s.io/client-go/restmapper/shortcut.go b/staging/src/k8s.io/client-go/restmapper/shortcut.go index 7ab3cd46fe38c..ca517a01d4dc5 100644 --- a/staging/src/k8s.io/client-go/restmapper/shortcut.go +++ b/staging/src/k8s.io/client-go/restmapper/shortcut.go @@ -17,6 +17,7 @@ limitations under the License. package restmapper import ( + "fmt" "strings" "k8s.io/klog/v2" @@ -32,13 +33,15 @@ type shortcutExpander struct { RESTMapper meta.RESTMapper discoveryClient discovery.DiscoveryInterface + + warningHandler func(string) } var _ meta.ResettableRESTMapper = shortcutExpander{} // NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { - return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, warningHandler func(string)) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client, warningHandler: warningHandler} } // KindFor fulfills meta.RESTMapper @@ -145,16 +148,37 @@ func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionRes } } + found := false + var rsc schema.GroupVersionResource + warnedAmbiguousShortcut := make(map[schema.GroupResource]bool) for _, item := range shortcutResources { if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { continue } if resource.Resource == item.ShortForm.Resource { - resource.Resource = item.LongForm.Resource - resource.Group = item.LongForm.Group - return resource + if found { + if item.LongForm.Group == rsc.Group && item.LongForm.Resource == rsc.Resource { + // It is common and acceptable that group/resource has multiple + // versions registered in cluster. This does not introduce ambiguity + // in terms of shortname usage. + continue + } + if !warnedAmbiguousShortcut[item.LongForm] { + if e.warningHandler != nil { + e.warningHandler(fmt.Sprintf("short name %q could also match lower priority resource %s", resource.Resource, item.LongForm.String())) + } + warnedAmbiguousShortcut[item.LongForm] = true + } + continue + } + rsc.Resource = item.LongForm.Resource + rsc.Group = item.LongForm.Group + found = true } } + if found { + return rsc + } // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling if len(resource.Group) == 0 { diff --git a/staging/src/k8s.io/client-go/restmapper/shortcut_test.go b/staging/src/k8s.io/client-go/restmapper/shortcut_test.go index fa2355d5f1ded..e1f87ae26c0e2 100644 --- a/staging/src/k8s.io/client-go/restmapper/shortcut_test.go +++ b/staging/src/k8s.io/client-go/restmapper/shortcut_test.go @@ -133,7 +133,7 @@ func TestReplaceAliases(t *testing.T) { ds.serverResourcesHandler = func() ([]*metav1.APIResourceList, error) { return test.srvRes, nil } - mapper := NewShortcutExpander(&fakeRESTMapper{}, ds).(shortcutExpander) + mapper := NewShortcutExpander(&fakeRESTMapper{}, ds, nil).(shortcutExpander) actual := mapper.expandResourceShortcut(schema.GroupVersionResource{Resource: test.arg}) if actual != test.expected { @@ -187,7 +187,9 @@ func TestKindFor(t *testing.T) { } delegate := &fakeRESTMapper{} - mapper := NewShortcutExpander(delegate, ds) + mapper := NewShortcutExpander(delegate, ds, func(a string) { + t.Fatalf("unexpected warning message %s", a) + }) mapper.KindFor(test.in) if delegate.kindForInput != test.expected { @@ -242,7 +244,9 @@ func TestKindForWithNewCRDs(t *testing.T) { // will answer the initial request, only failure to match will trigger // the cache invalidation and live discovery call delegate := NewDeferredDiscoveryRESTMapper(fakeCachedDiscovery) - mapper := NewShortcutExpander(delegate, fakeCachedDiscovery) + mapper := NewShortcutExpander(delegate, fakeCachedDiscovery, func(a string) { + t.Fatalf("unexpected warning message %s", a) + }) gvk, err := mapper.KindFor(test.in) if err != nil { @@ -255,6 +259,201 @@ func TestKindForWithNewCRDs(t *testing.T) { } } +func TestWarnAmbigious(t *testing.T) { + tests := []struct { + name string + arg string + expected schema.GroupVersionResource + expectedWarningLogs []string + srvRes []*metav1.APIResourceList + }{ + { + name: "warn ambiguity", + arg: "hpa", + expected: schema.GroupVersionResource{Resource: "superhorizontalpodautoscalers", Group: "autoscaling"}, + expectedWarningLogs: []string{`short name "hpa" could also match lower priority resource horizontalpodautoscalers.autoscaling`}, + srvRes: []*metav1.APIResourceList{ + { + GroupVersion: "autoscaling/v1", + APIResources: []metav1.APIResource{ + { + Name: "superhorizontalpodautoscalers", + ShortNames: []string{"hpa"}, + }, + }, + }, + { + GroupVersion: "autoscaling/v1", + APIResources: []metav1.APIResource{ + { + Name: "horizontalpodautoscalers", + ShortNames: []string{"hpa"}, + }, + }, + }, + }, + }, + { + name: "warn-builtin-shortname-ambugity", + arg: "po", + expected: schema.GroupVersionResource{Resource: "pods", Group: ""}, + expectedWarningLogs: []string{`short name "po" could also match lower priority resource poddlers.acme.com`}, + srvRes: []*metav1.APIResourceList{ + { + GroupVersion: "v1", + APIResources: []metav1.APIResource{{Name: "pods", SingularName: "pod", ShortNames: []string{"po"}}}, + }, + { + GroupVersion: "acme.com/v1", + APIResources: []metav1.APIResource{{Name: "poddlers", ShortNames: []string{"po"}}}, + }, + }, + }, + { + name: "warn-builtin-shortname-ambugity-multi-version", + arg: "po", + expected: schema.GroupVersionResource{Resource: "pods", Group: ""}, + expectedWarningLogs: []string{`short name "po" could also match lower priority resource poddlers.acme.com`}, + srvRes: []*metav1.APIResourceList{ + { + GroupVersion: "v1", + APIResources: []metav1.APIResource{{Name: "pods", SingularName: "pod", ShortNames: []string{"po"}}}, + }, + { + GroupVersion: "acme.com/v1", + APIResources: []metav1.APIResource{{Name: "poddlers", ShortNames: []string{"po"}}}, + }, + { + GroupVersion: "acme.com/v1beta1", + APIResources: []metav1.APIResource{{Name: "poddlers", ShortNames: []string{"po"}}}, + }, + }, + }, + { + name: "resource-match-singular-preferred", + arg: "pod", + expected: schema.GroupVersionResource{Resource: "pod", Group: ""}, + srvRes: []*metav1.APIResourceList{ + { + GroupVersion: "v1", + APIResources: []metav1.APIResource{{Name: "pods", SingularName: "pod"}}, + }, + { + GroupVersion: "acme.com/v1", + APIResources: []metav1.APIResource{{Name: "poddlers", ShortNames: []string{"pods", "pod"}}}, + }, + }, + }, + { + name: "resource-multiple-versions-shortform", + arg: "hpa", + expected: schema.GroupVersionResource{Resource: "horizontalpodautoscalers", Group: "autoscaling"}, + expectedWarningLogs: []string{}, + srvRes: []*metav1.APIResourceList{ + { + GroupVersion: "autoscaling/v1alphav1", + APIResources: []metav1.APIResource{ + { + Name: "horizontalpodautoscalers", + ShortNames: []string{"hpa"}, + }, + }, + }, + { + GroupVersion: "autoscaling/v1", + APIResources: []metav1.APIResource{ + { + Name: "horizontalpodautoscalers", + ShortNames: []string{"hpa"}, + }, + }, + }, + }, + }, + { + name: "multi-resource-multiple-versions-shortform", + arg: "hpa", + expected: schema.GroupVersionResource{Resource: "horizontalpodautoscalers", Group: "autoscaling"}, + expectedWarningLogs: []string{ + `short name "hpa" could also match lower priority resource foo.foo`, + `short name "hpa" could also match lower priority resource bar.bar`, + }, + srvRes: []*metav1.APIResourceList{ + { + GroupVersion: "autoscaling/v1alphav1", + APIResources: []metav1.APIResource{ + { + Name: "horizontalpodautoscalers", + ShortNames: []string{"hpa"}, + }, + }, + }, + { + GroupVersion: "autoscaling/v1", + APIResources: []metav1.APIResource{ + { + Name: "horizontalpodautoscalers", + ShortNames: []string{"hpa"}, + }, + }, + }, + { + GroupVersion: "foo/v1", + APIResources: []metav1.APIResource{ + { + Name: "foo", + ShortNames: []string{"hpa"}, + }, + }, + }, + { + GroupVersion: "foo/v1beta1", + APIResources: []metav1.APIResource{ + { + Name: "foo", + ShortNames: []string{"hpa"}, + }, + }, + }, + { + GroupVersion: "bar/v1", + APIResources: []metav1.APIResource{ + { + Name: "bar", + ShortNames: []string{"hpa"}, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + ds := &fakeDiscoveryClient{} + ds.serverResourcesHandler = func() ([]*metav1.APIResourceList, error) { + return test.srvRes, nil + } + + var actualWarnings []string + mapper := NewShortcutExpander(&fakeRESTMapper{}, ds, func(a string) { + actualWarnings = append(actualWarnings, a) + }).(shortcutExpander) + + actual := mapper.expandResourceShortcut(schema.GroupVersionResource{Resource: test.arg}) + if actual != test.expected { + t.Errorf("%s: unexpected argument: expected %s, got %s", test.name, test.expected, actual) + } + + if len(actualWarnings) == 0 && len(test.expectedWarningLogs) == 0 { + continue + } + + if !cmp.Equal(test.expectedWarningLogs, actualWarnings) { + t.Fatalf("expected warning message %s but got %s", test.expectedWarningLogs, actualWarnings) + } + } +} + type fakeRESTMapper struct { kindForInput schema.GroupVersionResource } diff --git a/staging/src/k8s.io/client-go/tools/cache/heap_test.go b/staging/src/k8s.io/client-go/tools/cache/heap_test.go index c2e476988f708..ed50ee01351ce 100644 --- a/staging/src/k8s.io/client-go/tools/cache/heap_test.go +++ b/staging/src/k8s.io/client-go/tools/cache/heap_test.go @@ -264,7 +264,7 @@ func TestHeap_Get(t *testing.T) { } // Get non-existing object. _, exists, err = h.Get(mkHeapObj("non-existing", 0)) - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } @@ -283,7 +283,7 @@ func TestHeap_GetByKey(t *testing.T) { } // Get non-existing object. _, exists, err = h.GetByKey("non-existing") - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector.go b/staging/src/k8s.io/client-go/tools/cache/reflector.go index 119f80ac45f54..c1ea13de574be 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector.go @@ -334,12 +334,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return nil } if err != nil { - if !apierrors.IsInvalid(err) { - return err - } - klog.Warning("The watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantics") + klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) fallbackToList = true - // Ensure that we won't accidentally pass some garbage down the watch. + // ensure that we won't accidentally pass some garbage down the watch. w = nil } } @@ -397,6 +394,11 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { case <-stopCh: + // we can only end up here when the stopCh + // was closed after a successful watchlist or list request + if w != nil { + w.Stop() + } return nil default: } @@ -672,6 +674,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // "k8s.io/initial-events-end" bookmark. initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) r.setIsLastSyncResourceVersionUnavailable(false) + + // we utilize the temporaryStore to ensure independence from the current store implementation. + // as of today, the store is implemented as a queue and will be drained by the higher-level + // component as soon as it finishes replacing the content. + checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %v", err) } @@ -764,7 +772,7 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { if exitOnInitialEventsEndBookmark != nil { *exitOnInitialEventsEndBookmark = true } diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/staging/src/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go new file mode 100644 index 0000000000000..aa3027d714e95 --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -0,0 +1,119 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "os" + "sort" + "strconv" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +var dataConsistencyDetectionEnabled = false + +func init() { + dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// checkWatchListConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + if !dataConsistencyDetectionEnabled { + return + } + checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) +} + +// checkWatchListConsistency exists solely for testing purposes. +// we cannot use checkWatchListConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) + opts := metav1.ListOptions{ + ResourceVersion: lastSyncedResourceVersion, + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + } + var list runtime.Object + err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listerWatcher.List(opts) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + + listItems := toMetaObjectSliceOrDie(rawListItems) + storeItems := toMetaObjectSliceOrDie(store.List()) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(storeItems)) + + if !cmp.Equal(listItems, storeItems) { + klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) + msg := "data inconsistency detected for the watch-list feature, panicking!" + panic(msg) + } +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector_data_consistency_detector_test.go b/staging/src/k8s.io/client-go/tools/cache/reflector_data_consistency_detector_test.go new file mode 100644 index 0000000000000..3c7eda7def834 --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/cache/reflector_data_consistency_detector_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" +) + +func TestWatchListConsistency(t *testing.T) { + scenarios := []struct { + name string + + podList *v1.PodList + storeContent []*v1.Pod + + expectedRequestOptions []metav1.ListOptions + expectedListRequests int + expectPanic bool + }{ + { + name: "watchlist consistency check won't panic when data is consistent", + podList: &v1.PodList{ + ListMeta: metav1.ListMeta{ResourceVersion: "2"}, + Items: []v1.Pod{*makePod("p1", "1"), *makePod("p2", "2")}, + }, + storeContent: []*v1.Pod{makePod("p1", "1"), makePod("p2", "2")}, + expectedListRequests: 1, + expectedRequestOptions: []metav1.ListOptions{ + { + ResourceVersion: "2", + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + }, + }, + }, + + { + name: "watchlist consistency check won't panic when there is no data", + podList: &v1.PodList{ + ListMeta: metav1.ListMeta{ResourceVersion: "2"}, + }, + expectedListRequests: 1, + expectedRequestOptions: []metav1.ListOptions{ + { + ResourceVersion: "2", + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + }, + }, + }, + + { + name: "watchlist consistency panics when data is inconsistent", + podList: &v1.PodList{ + ListMeta: metav1.ListMeta{ResourceVersion: "2"}, + Items: []v1.Pod{*makePod("p1", "1"), *makePod("p2", "2"), *makePod("p3", "3")}, + }, + storeContent: []*v1.Pod{makePod("p1", "1"), makePod("p2", "2")}, + expectedListRequests: 1, + expectedRequestOptions: []metav1.ListOptions{ + { + ResourceVersion: "2", + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + }, + }, + expectPanic: true, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + listWatcher, store, _, stopCh := testData() + for _, obj := range scenario.storeContent { + require.NoError(t, store.Add(obj)) + } + listWatcher.customListResponse = scenario.podList + + if scenario.expectPanic { + require.Panics(t, func() { checkWatchListConsistency(stopCh, "", scenario.podList.ResourceVersion, listWatcher, store) }) + } else { + checkWatchListConsistency(stopCh, "", scenario.podList.ResourceVersion, listWatcher, store) + } + + verifyListCounter(t, listWatcher, scenario.expectedListRequests) + verifyRequestOptions(t, listWatcher, scenario.expectedRequestOptions) + }) + } +} + +func TestDriveWatchLisConsistencyIfRequired(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + checkWatchListConsistencyIfRequested(stopCh, "", "", nil, nil) +} + +func TestWatchListConsistencyRetry(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + stopCh := make(chan struct{}) + defer close(stopCh) + + stopListErrorAfter := 5 + errLister := &errorLister{stopErrorAfter: stopListErrorAfter} + + checkWatchListConsistency(stopCh, "", "", errLister, store) + require.Equal(t, errLister.listCounter, errLister.stopErrorAfter) +} + +type errorLister struct { + listCounter int + stopErrorAfter int +} + +func (lw *errorLister) List(_ metav1.ListOptions) (runtime.Object, error) { + lw.listCounter++ + if lw.listCounter == lw.stopErrorAfter { + return &v1.PodList{}, nil + } + return nil, fmt.Errorf("nasty error") +} + +func (lw *errorLister) Watch(_ metav1.ListOptions) (watch.Interface, error) { + panic("not implemented") +} diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector_test.go b/staging/src/k8s.io/client-go/tools/cache/reflector_test.go index b26fe345d8c44..611357b7d516c 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector_test.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector_test.go @@ -28,6 +28,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -124,6 +126,26 @@ func TestReflectorResyncChan(t *testing.T) { } } +// TestEstablishedWatchStoppedAfterStopCh ensures that +// an established watch will be closed right after +// the StopCh was also closed. +func TestEstablishedWatchStoppedAfterStopCh(t *testing.T) { + ctx, ctxCancel := context.WithCancel(context.TODO()) + ctxCancel() + w := watch.NewFake() + require.False(t, w.IsStopped()) + + // w is stopped when the stopCh is closed + target := NewReflector(nil, &v1.Pod{}, nil, 0) + err := target.watch(w, ctx.Done(), nil) + require.NoError(t, err) + require.True(t, w.IsStopped()) + + // noop when the w is nil and the ctx is closed + err = target.watch(nil, ctx.Done(), nil) + require.NoError(t, err) +} + func BenchmarkReflectorResyncChanMany(b *testing.B) { s := NewStore(MetaNamespaceKeyFunc) g := NewReflector(&testLW{}, &v1.Pod{}, s, 25*time.Millisecond) diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector_watchlist_test.go b/staging/src/k8s.io/client-go/tools/cache/reflector_watchlist_test.go index ae1750c7bb7a6..c43db073c5af8 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector_watchlist_test.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector_watchlist_test.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/utils/pointer" ) @@ -94,18 +95,39 @@ func TestWatchList(t *testing.T) { expectedStoreContent: []v1.Pod{*makePod("p1", "1")}, }, { - name: "returning any other error than apierrors.NewInvalid stops the reflector and reports the error", + name: "returning any other error than apierrors.NewInvalid forces fallback", watchOptionsPredicate: func(options metav1.ListOptions) error { - return fmt.Errorf("dummy error") + if options.SendInitialEvents != nil && *options.SendInitialEvents { + return fmt.Errorf("dummy error") + } + return nil + }, + podList: &v1.PodList{ + ListMeta: metav1.ListMeta{ResourceVersion: "1"}, + Items: []v1.Pod{*makePod("p1", "1")}, + }, + closeAfterWatchEvents: 1, + watchEvents: []watch.Event{{Type: watch.Added, Object: makePod("p2", "2")}}, + expectedWatchRequests: 2, + expectedListRequests: 1, + expectedStoreContent: []v1.Pod{*makePod("p1", "1"), *makePod("p2", "2")}, + expectedRequestOptions: []metav1.ListOptions{ + { + SendInitialEvents: pointer.Bool(true), + AllowWatchBookmarks: true, + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: pointer.Int64(1), + }, + { + ResourceVersion: "0", + Limit: 500, + }, + { + AllowWatchBookmarks: true, + ResourceVersion: "1", + TimeoutSeconds: pointer.Int64(1), + }, }, - expectedError: fmt.Errorf("dummy error"), - expectedWatchRequests: 1, - expectedRequestOptions: []metav1.ListOptions{{ - SendInitialEvents: pointer.Bool(true), - AllowWatchBookmarks: true, - ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, - TimeoutSeconds: pointer.Int64(1), - }}, }, { name: "the reflector can fall back to old LIST/WATCH semantics when a server doesn't support streaming", @@ -350,6 +372,27 @@ func TestWatchList(t *testing.T) { expectedStoreContent: []v1.Pod{*makePod("p1", "1"), *makePod("p3", "3")}, expectedError: apierrors.NewResourceExpired("rv already expired"), }, + { + name: "prove that the reflector is checking the value of the initialEventsEnd annotation", + closeAfterWatchEvents: 3, + watchEvents: []watch.Event{ + {Type: watch.Added, Object: makePod("p1", "1")}, + {Type: watch.Added, Object: makePod("p2", "2")}, + {Type: watch.Bookmark, Object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: "2", + Annotations: map[string]string{"k8s.io/initial-events-end": "false"}, + }, + }}, + }, + expectedWatchRequests: 1, + expectedRequestOptions: []metav1.ListOptions{{ + SendInitialEvents: pointer.Bool(true), + AllowWatchBookmarks: true, + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: pointer.Int64(1), + }}, + }, } for _, s := range scenarios { t.Run(s.name, func(t *testing.T) { @@ -449,7 +492,7 @@ func verifyStore(t *testing.T, s Store, expectedPods []v1.Pod) { } func makePod(name, rv string) *v1.Pod { - return &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name, ResourceVersion: rv}} + return &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name, ResourceVersion: rv, UID: types.UID(name)}} } func testData() (*fakeListWatcher, Store, *Reflector, chan struct{}) { diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 10744156b8361..0fc2fd0a0cac1 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -49,12 +49,12 @@ type InClusterConfig interface { Possible() bool } -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } diff --git a/staging/src/k8s.io/client-go/tools/events/event_broadcaster.go b/staging/src/k8s.io/client-go/tools/events/event_broadcaster.go index e3000bf6ec10c..e0164f301ee6b 100644 --- a/staging/src/k8s.io/client-go/tools/events/event_broadcaster.go +++ b/staging/src/k8s.io/client-go/tools/events/event_broadcaster.go @@ -81,27 +81,27 @@ type EventSinkImpl struct { } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't create an event with empty namespace") } - return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{}) + return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{}) } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't update an event with empty namespace") } - return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{}) + return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{}) } // Patch applies the patch and returns the patched event, and an error, if there is any. -func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't patch an event with empty namespace") } - return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) + return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) } // NewBroadcaster Creates a new event broadcaster. @@ -124,13 +124,13 @@ func (e *eventBroadcasterImpl) Shutdown() { } // refreshExistingEventSeries refresh events TTL -func (e *eventBroadcasterImpl) refreshExistingEventSeries() { +func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) { // TODO: Investigate whether lock contention won't be a problem e.mu.Lock() defer e.mu.Unlock() for isomorphicKey, event := range e.eventCache { if event.Series != nil { - if recordedEvent, retry := recordEvent(e.sink, event); !retry { + if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry { if recordedEvent != nil { e.eventCache[isomorphicKey] = recordedEvent } @@ -142,7 +142,7 @@ func (e *eventBroadcasterImpl) refreshExistingEventSeries() { // finishSeries checks if a series has ended and either: // - write final count to the apiserver // - delete a singleton event (i.e. series field is nil) from the cache -func (e *eventBroadcasterImpl) finishSeries() { +func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) { // TODO: Investigate whether lock contention won't be a problem e.mu.Lock() defer e.mu.Unlock() @@ -150,7 +150,7 @@ func (e *eventBroadcasterImpl) finishSeries() { eventSerie := event.Series if eventSerie != nil { if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) { - if _, retry := recordEvent(e.sink, event); !retry { + if _, retry := recordEvent(ctx, e.sink, event); !retry { delete(e.eventCache, isomorphicKey) } } @@ -161,13 +161,13 @@ func (e *eventBroadcasterImpl) finishSeries() { } // NewRecorder returns an EventRecorder that records events with the given event source. -func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder { +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger { hostname, _ := os.Hostname() reportingInstance := reportingController + "-" + hostname - return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}} + return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} } -func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) { +func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) { // Make a copy before modification, because there could be multiple listeners. eventCopy := event.DeepCopy() go func() { @@ -197,7 +197,7 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C }() if evToRecord != nil { // TODO: Add a metric counting the number of recording attempts - e.attemptRecording(evToRecord) + e.attemptRecording(ctx, evToRecord) // We don't want the new recorded Event to be reflected in the // client's cache because server-side mutations could mess with the // aggregation mechanism used by the client. @@ -205,40 +205,45 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C }() } -func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event { +func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) { tries := 0 for { - if recordedEvent, retry := recordEvent(e.sink, event); !retry { - return recordedEvent + if _, retry := recordEvent(ctx, e.sink, event); !retry { + return } tries++ if tries >= maxTriesPerEvent { - klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) - return nil + klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) + return } // Randomize sleep so that various clients won't all be - // synced up if the master goes down. - time.Sleep(wait.Jitter(e.sleepDuration, 0.25)) + // synced up if the master goes down. Give up when + // the context is canceled. + select { + case <-ctx.Done(): + return + case <-time.After(wait.Jitter(e.sleepDuration, 0.25)): + } } } -func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { +func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { var newEvent *eventsv1.Event var err error isEventSeries := event.Series != nil if isEventSeries { patch, patchBytesErr := createPatchBytesForSeries(event) if patchBytesErr != nil { - klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr) + klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible") return nil, false } - newEvent, err = sink.Patch(event, patch) + newEvent, err = sink.Patch(ctx, event, patch) } // Update can fail because the event may have been removed and it no longer exists. if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) { // Making sure that ResourceVersion is empty on creation event.ResourceVersion = "" - newEvent, err = sink.Create(event) + newEvent, err = sink.Create(ctx, event) } if err == nil { return newEvent, false @@ -248,7 +253,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) return nil, false case *errors.StatusError: if errors.IsAlreadyExists(err) { @@ -260,9 +265,9 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) if isEventSeries { return nil, true } - klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) } else { - klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) } return nil, false case *errors.UnexpectedObjectError: @@ -271,7 +276,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) default: // This case includes actual http transport errors. Go ahead and retry. } - klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)") return nil, true } @@ -307,21 +312,31 @@ func getKey(event *eventsv1.Event) eventKey { // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. // The return value can be ignored or used to stop recording, if desired. // TODO: this function should also return an error. +// +// Deprecated: use StartLogging instead. func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() { - stopWatcher, err := e.StartEventWatcher( + logger := klog.Background().V(int(verbosity)) + stopWatcher, err := e.StartLogging(logger) + if err != nil { + logger.Error(err, "Failed to start event watcher") + return func() {} + } + return stopWatcher +} + +// StartLogging starts sending events received from this EventBroadcaster to the structured logger. +// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). +// The returned function can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) { + return e.StartEventWatcher( func(obj runtime.Object) { event, ok := obj.(*eventsv1.Event) if !ok { - klog.Errorf("unexpected type, expected eventsv1.Event") + logger.Error(nil, "unexpected type, expected eventsv1.Event") return } - klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) + logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) }) - if err != nil { - klog.Errorf("failed to start event watcher: '%v'", err) - return func() {} - } - return stopWatcher } // StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. @@ -329,7 +344,6 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) { watcher, err := e.Watch() if err != nil { - klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err) return nil, err } go func() { @@ -345,37 +359,42 @@ func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime return watcher.Stop, nil } -func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) error { +func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error { eventHandler := func(obj runtime.Object) { event, ok := obj.(*eventsv1.Event) if !ok { - klog.Errorf("unexpected type, expected eventsv1.Event") + klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event") return } - e.recordToSink(event, clock.RealClock{}) + e.recordToSink(ctx, event, clock.RealClock{}) } stopWatcher, err := e.StartEventWatcher(eventHandler) if err != nil { return err } go func() { - <-stopCh + <-ctx.Done() stopWatcher() }() return nil } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// Deprecated: use StartRecordingToSinkWithContext instead. func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) { - go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh) - go wait.Until(e.finishSeries, finishTime, stopCh) - err := e.startRecordingEvents(stopCh) + err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh)) if err != nil { - klog.Errorf("unexpected type, expected eventsv1.Event") - return + klog.Background().Error(err, "Failed to start recording to sink") } } +// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error { + go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime) + go wait.UntilWithContext(ctx, e.finishSeries, finishTime) + return e.startRecordingEvents(ctx) +} + type eventBroadcasterAdapterImpl struct { coreClient typedv1core.EventsGetter coreBroadcaster record.EventBroadcaster @@ -409,14 +428,14 @@ func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{ } } -func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder { +func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger { if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name) } return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name)) } -func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder { +func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger { return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name}) } diff --git a/staging/src/k8s.io/client-go/tools/events/event_broadcaster_test.go b/staging/src/k8s.io/client-go/tools/events/event_broadcaster_test.go index ac7f7abe80f4b..f765c2fced5b9 100644 --- a/staging/src/k8s.io/client-go/tools/events/event_broadcaster_test.go +++ b/staging/src/k8s.io/client-go/tools/events/event_broadcaster_test.go @@ -25,6 +25,7 @@ import ( eventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2/ktesting" ) func TestRecordEventToSink(t *testing.T) { @@ -78,11 +79,12 @@ func TestRecordEventToSink(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) kubeClient := fake.NewSimpleClientset() eventSink := &EventSinkImpl{Interface: kubeClient.EventsV1()} for _, ev := range tc.eventsToRecord { - recordEvent(eventSink, &ev) + recordEvent(ctx, eventSink, &ev) } recordedEvents, err := kubeClient.EventsV1().Events(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) diff --git a/staging/src/k8s.io/client-go/tools/events/event_recorder.go b/staging/src/k8s.io/client-go/tools/events/event_recorder.go index 17d0532715379..654317884f202 100644 --- a/staging/src/k8s.io/client-go/tools/events/event_recorder.go +++ b/staging/src/k8s.io/client-go/tools/events/event_recorder.go @@ -40,12 +40,33 @@ type recorderImpl struct { clock clock.Clock } +var _ EventRecorder = &recorderImpl{} + func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...) +} + +type recorderImplLogger struct { + *recorderImpl + logger klog.Logger +} + +var _ EventRecorderLogger = &recorderImplLogger{} + +func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...) +} + +func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { + return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} +} + +func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { timestamp := metav1.MicroTime{Time: time.Now()} message := fmt.Sprintf(note, args...) refRegarding, err := reference.GetReference(recorder.scheme, regarding) if err != nil { - klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message) + logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message) return } @@ -53,11 +74,11 @@ func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.O if related != nil { refRelated, err = reference.GetReference(recorder.scheme, related) if err != nil { - klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) + logger.V(9).Info("Could not construct reference", "object", related, "err", err) } } if !util.ValidateEventType(eventtype) { - klog.Errorf("Unsupported event type: '%v'", eventtype) + logger.Error(nil, "Unsupported event type", "eventType", eventtype) return } event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action) diff --git a/staging/src/k8s.io/client-go/tools/events/eventseries_test.go b/staging/src/k8s.io/client-go/tools/events/eventseries_test.go index 2b9be92a69e1b..526101ca648c3 100644 --- a/staging/src/k8s.io/client-go/tools/events/eventseries_test.go +++ b/staging/src/k8s.io/client-go/tools/events/eventseries_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" ref "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2/ktesting" ) type testEventSeriesSink struct { @@ -43,7 +44,7 @@ type testEventSeriesSink struct { } // Create records the event for testing. -func (t *testEventSeriesSink) Create(e *eventsv1.Event) (*eventsv1.Event, error) { +func (t *testEventSeriesSink) Create(ctx context.Context, e *eventsv1.Event) (*eventsv1.Event, error) { if t.OnCreate != nil { return t.OnCreate(e) } @@ -51,7 +52,7 @@ func (t *testEventSeriesSink) Create(e *eventsv1.Event) (*eventsv1.Event, error) } // Update records the event for testing. -func (t *testEventSeriesSink) Update(e *eventsv1.Event) (*eventsv1.Event, error) { +func (t *testEventSeriesSink) Update(ctx context.Context, e *eventsv1.Event) (*eventsv1.Event, error) { if t.OnUpdate != nil { return t.OnUpdate(e) } @@ -59,7 +60,7 @@ func (t *testEventSeriesSink) Update(e *eventsv1.Event) (*eventsv1.Event, error) } // Patch records the event for testing. -func (t *testEventSeriesSink) Patch(e *eventsv1.Event, p []byte) (*eventsv1.Event, error) { +func (t *testEventSeriesSink) Patch(ctx context.Context, e *eventsv1.Event, p []byte) (*eventsv1.Event, error) { if t.OnPatch != nil { return t.OnPatch(e, p) } @@ -135,7 +136,9 @@ func TestEventSeriesf(t *testing.T) { }, } - stopCh := make(chan struct{}) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() createEvent := make(chan *eventsv1.Event) updateEvent := make(chan *eventsv1.Event) @@ -163,7 +166,7 @@ func TestEventSeriesf(t *testing.T) { // Don't call StartRecordingToSink, as we don't need neither refreshing event // series nor finishing them in this tests and additional events updated would // race with our expected ones. - err = broadcaster.startRecordingEvents(stopCh) + err = broadcaster.startRecordingEvents(ctx) if err != nil { t.Fatal(err) } @@ -184,7 +187,6 @@ func TestEventSeriesf(t *testing.T) { validateEvent(strconv.Itoa(index), false, actualEvent, item.expect, t) } } - close(stopCh) } // TestEventSeriesWithEventSinkImplRace verifies that when Events are emitted to @@ -256,6 +258,7 @@ func validateEvent(messagePrefix string, expectedUpdate bool, actualEvent *event } func TestFinishSeries(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) hostname, _ := os.Hostname() testPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -295,7 +298,7 @@ func TestFinishSeries(t *testing.T) { } cache := map[eventKey]*eventsv1.Event{} eventBroadcaster := newBroadcaster(&testEvents, 0, cache).(*eventBroadcasterImpl) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImpl) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImplLogger) cachedEvent := recorder.makeEvent(regarding, related, metav1.MicroTime{Time: time.Now()}, v1.EventTypeNormal, "test", "some verbose message: 1", "eventTest", "eventTest-"+hostname, "started") nonFinishedEvent := cachedEvent.DeepCopy() nonFinishedEvent.ReportingController = "nonFinished-controller" @@ -305,7 +308,7 @@ func TestFinishSeries(t *testing.T) { } cache[getKey(cachedEvent)] = cachedEvent cache[getKey(nonFinishedEvent)] = nonFinishedEvent - eventBroadcaster.finishSeries() + eventBroadcaster.finishSeries(ctx) select { case actualEvent := <-patchEvent: t.Logf("validating event affected by patch request") @@ -327,6 +330,7 @@ func TestFinishSeries(t *testing.T) { } func TestRefreshExistingEventSeries(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) hostname, _ := os.Hostname() testPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -381,7 +385,7 @@ func TestRefreshExistingEventSeries(t *testing.T) { } cache := map[eventKey]*eventsv1.Event{} eventBroadcaster := newBroadcaster(&testEvents, 0, cache).(*eventBroadcasterImpl) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImpl) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImplLogger) cachedEvent := recorder.makeEvent(regarding, related, metav1.MicroTime{Time: time.Now()}, v1.EventTypeNormal, "test", "some verbose message: 1", "eventTest", "eventTest-"+hostname, "started") cachedEvent.Series = &eventsv1.EventSeries{ Count: 10, @@ -390,7 +394,7 @@ func TestRefreshExistingEventSeries(t *testing.T) { cacheKey := getKey(cachedEvent) cache[cacheKey] = cachedEvent - eventBroadcaster.refreshExistingEventSeries() + eventBroadcaster.refreshExistingEventSeries(ctx) select { case <-patchEvent: t.Logf("validating event affected by patch request") diff --git a/staging/src/k8s.io/client-go/tools/events/fake.go b/staging/src/k8s.io/client-go/tools/events/fake.go index d572e0d3e173a..e26826d6c8358 100644 --- a/staging/src/k8s.io/client-go/tools/events/fake.go +++ b/staging/src/k8s.io/client-go/tools/events/fake.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" ) // FakeRecorder is used as a fake during tests. It is thread safe. It is usable @@ -29,6 +30,8 @@ type FakeRecorder struct { Events chan string } +var _ EventRecorderLogger = &FakeRecorder{} + // Eventf emits an event func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { if f.Events != nil { @@ -36,6 +39,10 @@ func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, } } +func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { + return f +} + // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/staging/src/k8s.io/client-go/tools/events/interfaces.go b/staging/src/k8s.io/client-go/tools/events/interfaces.go index 20f8ca05daad3..bb6109f623493 100644 --- a/staging/src/k8s.io/client-go/tools/events/interfaces.go +++ b/staging/src/k8s.io/client-go/tools/events/interfaces.go @@ -17,39 +17,30 @@ limitations under the License. package events import ( + "context" + eventsv1 "k8s.io/api/events/v1" "k8s.io/apimachinery/pkg/runtime" + internalevents "k8s.io/client-go/tools/internal/events" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" ) -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Eventf constructs an event from the given information and puts it in the queue for sending. - // 'regarding' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers - // a creation or deletion of related object. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'action' explains what happened with regarding/what action did the ReportingController - // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) - // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). - // 'note' is intended to be human readable. - Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) -} +type EventRecorder = internalevents.EventRecorder +type EventRecorderLogger = internalevents.EventRecorderLogger // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + // Deprecated: use StartRecordingToSinkWithContext instead. StartRecordingToSink(stopCh <-chan struct{}) + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSinkWithContext(ctx context.Context) error + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder + NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger // StartEventWatcher enables you to watch for emitted events without usage // of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests). @@ -59,8 +50,14 @@ type EventBroadcaster interface { // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured // logging function. The return value can be ignored or used to stop recording, if desired. + // Deprecated: use StartLogging instead. StartStructuredLogging(verbosity klog.Level) func() + // StartLogging starts sending events received from this EventBroadcaster to the structured logger. + // To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). + // The returned function can be ignored or used to stop recording, if desired. + StartLogging(logger klog.Logger) (func(), error) + // Shutdown shuts down the broadcaster Shutdown() } @@ -70,9 +67,9 @@ type EventBroadcaster interface { // It is assumed that EventSink will return the same sorts of errors as // client-go's REST client. type EventSink interface { - Create(event *eventsv1.Event) (*eventsv1.Event, error) - Update(event *eventsv1.Event) (*eventsv1.Event, error) - Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) + Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) + Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) + Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) } // EventBroadcasterAdapter is a auxiliary interface to simplify migration to @@ -85,10 +82,10 @@ type EventBroadcasterAdapter interface { StartRecordingToSink(stopCh <-chan struct{}) // NewRecorder creates a new Event Recorder with specified name. - NewRecorder(name string) EventRecorder + NewRecorder(name string) EventRecorderLogger // DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name. - DeprecatedNewLegacyRecorder(name string) record.EventRecorder + DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger // Shutdown shuts down the broadcaster. Shutdown() diff --git a/staging/src/k8s.io/client-go/tools/internal/events/interfaces.go b/staging/src/k8s.io/client-go/tools/internal/events/interfaces.go new file mode 100644 index 0000000000000..be6261b531f2b --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/internal/events/interfaces.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package internal is needed to break an import cycle: record.EventRecorderAdapter +// needs this interface definition to implement it, but event.NewEventBroadcasterAdapter +// needs record.NewBroadcaster. Therefore this interface cannot be in event/interfaces.go. +package internal + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" +) + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Eventf constructs an event from the given information and puts it in the queue for sending. + // 'regarding' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'action' explains what happened with regarding/what action did the ReportingController + // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) + // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). + // 'note' is intended to be human readable. + Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) +} + +// EventRecorderLogger extends EventRecorder such that a logger can +// be set for methods in EventRecorder. Normally, those methods +// uses the global default logger to record errors and debug messages. +// If that is not desired, use WithLogger to provide a logger instance. +type EventRecorderLogger interface { + EventRecorder + + // WithLogger replaces the context used for logging. This is a cheap call + // and meant to be used for contextual logging: + // recorder := ... + // logger := klog.FromContext(ctx) + // recorder.WithLogger(logger).Eventf(...) + WithLogger(logger klog.Logger) EventRecorderLogger +} diff --git a/staging/src/k8s.io/client-go/tools/record/event.go b/staging/src/k8s.io/client-go/tools/record/event.go index f176167dc80ef..d1511696d0fb1 100644 --- a/staging/src/k8s.io/client-go/tools/record/event.go +++ b/staging/src/k8s.io/client-go/tools/record/event.go @@ -29,6 +29,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" + internalevents "k8s.io/client-go/tools/internal/events" "k8s.io/client-go/tools/record/util" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" @@ -110,6 +111,21 @@ type EventRecorder interface { AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) } +// EventRecorderLogger extends EventRecorder such that a logger can +// be set for methods in EventRecorder. Normally, those methods +// uses the global default logger to record errors and debug messages. +// If that is not desired, use WithLogger to provide a logger instance. +type EventRecorderLogger interface { + EventRecorder + + // WithLogger replaces the context used for logging. This is a cheap call + // and meant to be used for contextual logging: + // recorder := ... + // logger := klog.FromContext(ctx) + // recorder.WithLogger(logger).Eventf(...) + WithLogger(logger klog.Logger) EventRecorderLogger +} + // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { // StartEventWatcher starts sending events received from this EventBroadcaster to the given @@ -131,7 +147,7 @@ type EventBroadcaster interface { // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger // Shutdown shuts down the broadcaster. Once the broadcaster is shut // down, it will only try to record an event in a sink once before @@ -142,12 +158,14 @@ type EventBroadcaster interface { // EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder // implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. type EventRecorderAdapter struct { - recorder EventRecorder + recorder EventRecorderLogger } +var _ internalevents.EventRecorder = &EventRecorderAdapter{} + // NewEventRecorderAdapter returns an adapter implementing the new // "k8s.io/client-go/tools/events".EventRecorder interface. -func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { +func NewEventRecorderAdapter(recorder EventRecorderLogger) *EventRecorderAdapter { return &EventRecorderAdapter{ recorder: recorder, } @@ -158,28 +176,76 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re a.recorder.Eventf(regarding, eventtype, reason, note, args...) } +func (a *EventRecorderAdapter) WithLogger(logger klog.Logger) internalevents.EventRecorderLogger { + return &EventRecorderAdapter{ + recorder: a.recorder.WithLogger(logger), + } +} + // Creates a new event broadcaster. -func NewBroadcaster() EventBroadcaster { - return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) +func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster { + c := config{ + sleepDuration: defaultSleepDuration, + } + for _, opt := range opts { + opt(&c) + } + eventBroadcaster := &eventBroadcasterImpl{ + Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + sleepDuration: c.sleepDuration, + options: c.CorrelatorOptions, + } + ctx := c.Context + if ctx == nil { + ctx = context.Background() + } else { + // Calling Shutdown is not required when a context was provided: + // when the context is canceled, this goroutine will shut down + // the broadcaster. + go func() { + <-ctx.Done() + eventBroadcaster.Broadcaster.Shutdown() + }() + } + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx) + return eventBroadcaster } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration) + return NewBroadcaster(WithSleepDuration(sleepDuration)) } func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { - eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) - eventBroadcaster.options = options - return eventBroadcaster + return NewBroadcaster(WithCorrelatorOptions(options)) } -func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl { - eventBroadcaster := &eventBroadcasterImpl{ - Broadcaster: broadcaster, - sleepDuration: sleepDuration, +func WithCorrelatorOptions(options CorrelatorOptions) BroadcasterOption { + return func(c *config) { + c.CorrelatorOptions = options + } +} + +// WithContext sets a context for the broadcaster. Canceling the context will +// shut down the broadcaster, Shutdown doesn't need to be called. The context +// can also be used to provide a logger. +func WithContext(ctx context.Context) BroadcasterOption { + return func(c *config) { + c.Context = ctx } - eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background()) - return eventBroadcaster +} + +func WithSleepDuration(sleepDuration time.Duration) BroadcasterOption { + return func(c *config) { + c.sleepDuration = sleepDuration + } +} + +type BroadcasterOption func(*config) + +type config struct { + CorrelatorOptions + context.Context + sleepDuration time.Duration } type eventBroadcasterImpl struct { @@ -220,12 +286,12 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve } tries := 0 for { - if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + if recordEvent(e.cancelationCtx, sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { break } tries++ if tries >= maxTriesPerEvent { - klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) break } @@ -237,7 +303,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve } select { case <-e.cancelationCtx.Done(): - klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event) + klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (broadcaster is shut down)", "event", event) return case <-time.After(delay): } @@ -248,7 +314,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve // was successfully recorded or discarded, false if it should be retried. // If updateExistingEvent is false, it creates a new event, otherwise it updates // existing event. -func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { +func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { var newEvent *v1.Event var err error if updateExistingEvent { @@ -271,13 +337,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) return true case *errors.StatusError: if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) { - klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) } else { - klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) } return true case *errors.UnexpectedObjectError: @@ -286,7 +352,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err) + klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)", "event", event) return false } @@ -299,12 +365,15 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int }) } -// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. +// StartStructuredLogging starts sending events received from this EventBroadcaster to a structured logger. +// The logger is retrieved from a context if the broadcaster was constructed with a context, otherwise +// the global default is used. // The return value can be ignored or used to stop recording, if desired. func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface { + loggerV := klog.FromContext(e.cancelationCtx).V(int(verbosity)) return e.StartEventWatcher( func(e *v1.Event) { - klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) + loggerV.Info("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) }) } @@ -313,26 +382,32 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { watcher, err := e.Watch() if err != nil { - klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err) + klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)") } go func() { defer utilruntime.HandleCrash() - for watchEvent := range watcher.ResultChan() { - event, ok := watchEvent.Object.(*v1.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue + for { + select { + case <-e.cancelationCtx.Done(): + watcher.Stop() + return + case watchEvent := <-watcher.ResultChan(): + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) } - eventHandler(event) } }() return watcher } // NewRecorder returns an EventRecorder that records events with the given event source. -func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { - return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger { + return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} } type recorderImpl struct { @@ -342,15 +417,17 @@ type recorderImpl struct { clock clock.PassiveClock } -func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) { +var _ EventRecorder = &recorderImpl{} + +func (recorder *recorderImpl) generateEvent(logger klog.Logger, object runtime.Object, annotations map[string]string, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + logger.Error(err, "Could not construct reference, will not report event", "object", object, "eventType", eventtype, "reason", reason, "message", message) return } if !util.ValidateEventType(eventtype) { - klog.Errorf("Unsupported event type: '%v'", eventtype) + logger.Error(nil, "Unsupported event type", "eventType", eventtype) return } @@ -367,16 +444,16 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m // outgoing events anyway). sent, err := recorder.ActionOrDrop(watch.Added, event) if err != nil { - klog.Errorf("unable to record event: %v (will not retry!)", err) + logger.Error(err, "Unable to record event (will not retry!)") return } if !sent { - klog.Errorf("unable to record event: too many queued events, dropped event %#v", event) + logger.Error(nil, "Unable to record event: too many queued events, dropped event", "event", event) } } func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, nil, eventtype, reason, message) + recorder.generateEvent(klog.Background(), object, nil, eventtype, reason, message) } func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { @@ -384,7 +461,7 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m } func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) + recorder.generateEvent(klog.Background(), object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) } func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { @@ -408,3 +485,26 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map Type: eventtype, } } + +type recorderImplLogger struct { + *recorderImpl + logger klog.Logger +} + +var _ EventRecorderLogger = &recorderImplLogger{} + +func (recorder recorderImplLogger) Event(object runtime.Object, eventtype, reason, message string) { + recorder.recorderImpl.generateEvent(recorder.logger, object, nil, eventtype, reason, message) +} + +func (recorder recorderImplLogger) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder recorderImplLogger) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(recorder.logger, object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { + return recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} +} diff --git a/staging/src/k8s.io/client-go/tools/record/event_test.go b/staging/src/k8s.io/client-go/tools/record/event_test.go index fb68817ce9ad2..f1bdef78e3a00 100644 --- a/staging/src/k8s.io/client-go/tools/record/event_test.go +++ b/staging/src/k8s.io/client-go/tools/record/event_test.go @@ -112,7 +112,7 @@ func TestNonRacyShutdown(t *testing.T) { caster := NewBroadcasterForTests(0) clock := testclocks.NewFakeClock(time.Now()) - recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, caster, clock) + recorder := recorderWithFakeClock(t, v1.EventSource{Component: "eventTest"}, caster, clock) var wg sync.WaitGroup wg.Add(100) @@ -381,7 +381,7 @@ func TestEventf(t *testing.T) { sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) clock := testclocks.NewFakeClock(time.Now()) - recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock) + recorder := recorderWithFakeClock(t, v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock) for index, item := range table { clock.Step(1 * time.Second) logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) { @@ -407,7 +407,7 @@ func TestEventf(t *testing.T) { sinkWatcher.Stop() } -func recorderWithFakeClock(eventSource v1.EventSource, eventBroadcaster EventBroadcaster, clock clock.Clock) EventRecorder { +func recorderWithFakeClock(t *testing.T, eventSource v1.EventSource, eventBroadcaster EventBroadcaster, clock clock.Clock) EventRecorder { return &recorderImpl{scheme.Scheme, eventSource, eventBroadcaster.(*eventBroadcasterImpl).Broadcaster, clock} } @@ -662,7 +662,7 @@ func TestEventfNoNamespace(t *testing.T) { sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) clock := testclocks.NewFakeClock(time.Now()) - recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock) + recorder := recorderWithFakeClock(t, v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock) for index, item := range table { clock.Step(1 * time.Second) @@ -955,7 +955,7 @@ func TestMultiSinkCache(t *testing.T) { eventBroadcaster := NewBroadcasterForTests(0) clock := testclocks.NewFakeClock(time.Now()) - recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock) + recorder := recorderWithFakeClock(t, v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock) sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) for index, item := range table { diff --git a/staging/src/k8s.io/client-go/tools/record/fake.go b/staging/src/k8s.io/client-go/tools/record/fake.go index fda4ad8ff8ad3..67eac481712ad 100644 --- a/staging/src/k8s.io/client-go/tools/record/fake.go +++ b/staging/src/k8s.io/client-go/tools/record/fake.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" ) // FakeRecorder is used as a fake during tests. It is thread safe. It is usable @@ -31,6 +32,8 @@ type FakeRecorder struct { IncludeObject bool } +var _ EventRecorderLogger = &FakeRecorder{} + func objectString(object runtime.Object, includeObject bool) string { if !includeObject { return "" @@ -68,6 +71,10 @@ func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[st f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...) } +func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { + return f +} + // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/fallback.go b/staging/src/k8s.io/client-go/tools/remotecommand/fallback.go new file mode 100644 index 0000000000000..4846cdb55097a --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/remotecommand/fallback.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" +) + +var _ Executor = &fallbackExecutor{} + +type fallbackExecutor struct { + primary Executor + secondary Executor + shouldFallback func(error) bool +} + +// NewFallbackExecutor creates an Executor that first attempts to use the +// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial +// websocket "StreamWithContext" call fails. +// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { +func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) { + return &fallbackExecutor{ + primary: primary, + secondary: secondary, + shouldFallback: shouldFallback, + }, nil +} + +// Stream is deprecated. Please use "StreamWithContext". +func (f *fallbackExecutor) Stream(options StreamOptions) error { + return f.StreamWithContext(context.Background(), options) +} + +// StreamWithContext initially attempts to call "StreamWithContext" using the +// primary executor, falling back to calling the secondary executor if the +// initial primary call to upgrade to a websocket connection fails. +func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + err := f.primary.StreamWithContext(ctx, options) + if f.shouldFallback(err) { + return f.secondary.StreamWithContext(ctx, options) + } + return err +} diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/fallback_test.go b/staging/src/k8s.io/client-go/tools/remotecommand/fallback_test.go new file mode 100644 index 0000000000000..70049857050b1 --- /dev/null +++ b/staging/src/k8s.io/client-go/tools/remotecommand/fallback_test.go @@ -0,0 +1,227 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "bytes" + "context" + "crypto/rand" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" +) + +func TestFallbackClient_WebSocketPrimarySucceeds(t *testing.T) { + // Create fake WebSocket server. Copy received STDIN data back onto STDOUT stream. + websocketServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + conns, err := webSocketServerStreams(req, w, streamOptionsFromRequest(req)) + if err != nil { + w.WriteHeader(http.StatusForbidden) + return + } + defer conns.conn.Close() + // Loopback the STDIN stream onto the STDOUT stream. + _, err = io.Copy(conns.stdoutStream, conns.stdinStream) + require.NoError(t, err) + })) + defer websocketServer.Close() + + // Now create the fallback client (executor), and point it to the "websocketServer". + // Must add STDIN and STDOUT query params for the client request. + websocketServer.URL = websocketServer.URL + "?" + "stdin=true" + "&" + "stdout=true" + websocketLocation, err := url.Parse(websocketServer.URL) + require.NoError(t, err) + websocketExecutor, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) + require.NoError(t, err) + spdyExecutor, err := NewSPDYExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketLocation) + require.NoError(t, err) + // Never fallback, so always use the websocketExecutor, which succeeds against websocket server. + exec, err := NewFallbackExecutor(websocketExecutor, spdyExecutor, func(error) bool { return false }) + require.NoError(t, err) + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDOUT buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stdout bytes.Buffer + options := &StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stdout: &stdout, + } + errorChan := make(chan error) + go func() { + // Start the streaming on the WebSocket "exec" client. + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error") + } + } + + data, err := io.ReadAll(bytes.NewReader(stdout.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDOUT. + if !bytes.Equal(randomData, data) { + t.Errorf("unexpected data received: %d sent: %d", len(data), len(randomData)) + } +} + +func TestFallbackClient_SPDYSecondarySucceeds(t *testing.T) { + // Create fake SPDY server. Copy received STDIN data back onto STDOUT stream. + spdyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var stdin, stdout bytes.Buffer + ctx, err := createHTTPStreams(w, req, &StreamOptions{ + Stdin: &stdin, + Stdout: &stdout, + }) + if err != nil { + w.WriteHeader(http.StatusForbidden) + return + } + defer ctx.conn.Close() + _, err = io.Copy(ctx.stdoutStream, ctx.stdinStream) + if err != nil { + t.Fatalf("error copying STDIN to STDOUT: %v", err) + } + })) + defer spdyServer.Close() + + spdyLocation, err := url.Parse(spdyServer.URL) + require.NoError(t, err) + websocketExecutor, err := NewWebSocketExecutor(&rest.Config{Host: spdyLocation.Host}, "GET", spdyServer.URL) + require.NoError(t, err) + spdyExecutor, err := NewSPDYExecutor(&rest.Config{Host: spdyLocation.Host}, "POST", spdyLocation) + require.NoError(t, err) + // Always fallback to spdyExecutor, and spdyExecutor succeeds against fake spdy server. + exec, err := NewFallbackExecutor(websocketExecutor, spdyExecutor, func(error) bool { return true }) + require.NoError(t, err) + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDOUT buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stdout bytes.Buffer + options := &StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stdout: &stdout, + } + errorChan := make(chan error) + go func() { + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + if err != nil { + t.Errorf("unexpected error") + } + } + + data, err := io.ReadAll(bytes.NewReader(stdout.Bytes())) + if err != nil { + t.Errorf("error reading the stream: %v", err) + return + } + // Check the random data sent on STDIN was the same returned on STDOUT. + if !bytes.Equal(randomData, data) { + t.Errorf("unexpected data received: %d sent: %d", len(data), len(randomData)) + } +} + +func TestFallbackClient_PrimaryAndSecondaryFail(t *testing.T) { + // Create fake WebSocket server. Copy received STDIN data back onto STDOUT stream. + websocketServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + conns, err := webSocketServerStreams(req, w, streamOptionsFromRequest(req)) + if err != nil { + w.WriteHeader(http.StatusForbidden) + return + } + defer conns.conn.Close() + // Loopback the STDIN stream onto the STDOUT stream. + _, err = io.Copy(conns.stdoutStream, conns.stdinStream) + require.NoError(t, err) + })) + defer websocketServer.Close() + + // Now create the fallback client (executor), and point it to the "websocketServer". + // Must add STDIN and STDOUT query params for the client request. + websocketServer.URL = websocketServer.URL + "?" + "stdin=true" + "&" + "stdout=true" + websocketLocation, err := url.Parse(websocketServer.URL) + require.NoError(t, err) + websocketExecutor, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) + require.NoError(t, err) + spdyExecutor, err := NewSPDYExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketLocation) + require.NoError(t, err) + // Always fallback to spdyExecutor, but spdyExecutor fails against websocket server. + exec, err := NewFallbackExecutor(websocketExecutor, spdyExecutor, func(error) bool { return true }) + require.NoError(t, err) + // Update the websocket executor to request remote command v4, which is unsupported. + fallbackExec, ok := exec.(*fallbackExecutor) + assert.True(t, ok, "error casting executor as fallbackExecutor") + websocketExec, ok := fallbackExec.primary.(*wsStreamExecutor) + assert.True(t, ok, "error casting executor as websocket executor") + // Set the attempted subprotocol version to V4; websocket server only accepts V5. + websocketExec.protocols = []string{remotecommand.StreamProtocolV4Name} + + // Generate random data, and set it up to stream on STDIN. The data will be + // returned on the STDOUT buffer. + randomSize := 1024 * 1024 + randomData := make([]byte, randomSize) + if _, err := rand.Read(randomData); err != nil { + t.Errorf("unexpected error reading random data: %v", err) + } + var stdout bytes.Buffer + options := &StreamOptions{ + Stdin: bytes.NewReader(randomData), + Stdout: &stdout, + } + errorChan := make(chan error) + go func() { + errorChan <- exec.StreamWithContext(context.Background(), *options) + }() + + select { + case <-time.After(wait.ForeverTestTimeout): + t.Fatalf("expect stream to be closed after connection is closed.") + case err := <-errorChan: + // Ensure secondary executor returned an error. + require.Error(t, err) + } +} diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/spdy.go b/staging/src/k8s.io/client-go/tools/remotecommand/spdy.go index 76ea946b53573..c2bfcf8a6541e 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/spdy.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/spdy.go @@ -34,9 +34,10 @@ type spdyStreamExecutor struct { upgrader spdy.Upgrader transport http.RoundTripper - method string - url *url.URL - protocols []string + method string + url *url.URL + protocols []string + rejectRedirects bool // if true, receiving redirect from upstream is an error } // NewSPDYExecutor connects to the provided server and upgrades the connection to @@ -49,6 +50,20 @@ func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Ex return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) } +// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future +// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated) +// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect +// during the attempted upgrade in these "Stream" calls, an error is returned. +func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url) + if err != nil { + return nil, err + } + spdyExecutor := executor.(*spdyStreamExecutor) + spdyExecutor.rejectRedirects = true + return spdyExecutor, nil +} + // NewSPDYExecutorForTransports connects to the provided server using the given transport, // upgrades the response using the given upgrader to multiplexed bidirectional streams. func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { @@ -88,9 +103,15 @@ func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options return nil, nil, fmt.Errorf("error creating request: %v", err) } + client := http.Client{Transport: e.transport} + if e.rejectRedirects { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return fmt.Errorf("redirect not allowed") + } + } conn, protocol, err := spdy.Negotiate( e.upgrader, - &http.Client{Transport: e.transport}, + &client, req, e.protocols..., ) diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/spdy_test.go b/staging/src/k8s.io/client-go/tools/remotecommand/spdy_test.go index c11177a047ff3..1b1cf7491d263 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/spdy_test.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/spdy_test.go @@ -183,6 +183,7 @@ func TestSPDYExecutorStream(t *testing.T) { } func newTestHTTPServer(f AttachFunc, options *StreamOptions) *httptest.Server { + //nolint:errcheck server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { ctx, err := createHTTPStreams(writer, request, options) if err != nil { @@ -381,7 +382,7 @@ func TestStreamRandomData(t *testing.T) { } defer ctx.conn.Close() - io.Copy(ctx.stdoutStream, ctx.stdinStream) + io.Copy(ctx.stdoutStream, ctx.stdinStream) //nolint:errcheck })) defer server.Close() diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/websocket.go b/staging/src/k8s.io/client-go/tools/remotecommand/websocket.go index 9230027c0390d..a60986decca89 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/websocket.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/websocket.go @@ -18,8 +18,10 @@ package remotecommand import ( "context" + "errors" "fmt" "io" + "net" "net/http" "sync" "time" @@ -83,22 +85,26 @@ type wsStreamExecutor struct { heartbeatDeadline time.Duration } -// NewWebSocketExecutor allows to execute commands via a WebSocket connection. func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) { + // Only supports V5 protocol for correct version skew functionality. + // Previous api servers will proxy upgrade requests to legacy websocket + // servers on container runtimes which support V1-V4. These legacy + // websocket servers will not handle the new CLOSE signal. + return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name) +} + +// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection. +func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) { transport, upgrader, err := websocket.RoundTripperFor(config) if err != nil { return nil, fmt.Errorf("error creating websocket transports: %v", err) } return &wsStreamExecutor{ - transport: transport, - upgrader: upgrader, - method: method, - url: url, - // Only supports V5 protocol for correct version skew functionality. - // Previous api servers will proxy upgrade requests to legacy websocket - // servers on container runtimes which support V1-V4. These legacy - // websocket servers will not handle the new CLOSE signal. - protocols: []string{remotecommand.StreamProtocolV5Name}, + transport: transport, + upgrader: upgrader, + method: method, + url: url, + protocols: protocols, heartbeatPeriod: pingPeriod, heartbeatDeadline: pingReadDeadline, }, nil @@ -175,10 +181,12 @@ func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options Stream } type wsStreamCreator struct { - conn *gwebsocket.Conn + conn *gwebsocket.Conn + // Protects writing to websocket connection; reading is lock-free connWriteLock sync.Mutex - streams map[byte]*stream - streamsMu sync.Mutex + // map of stream id to stream; multiple streams read/write the connection + streams map[byte]*stream + streamsMu sync.Mutex } func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { @@ -224,7 +232,7 @@ func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, return s, nil } -// readDemuxLoop is the reading processor for this endpoint of the websocket +// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket // connection. This loop reads the connection, and demultiplexes the data // into one of the individual stream pipes (by checking the stream id). This // loop can *not* be run concurrently, because there can only be one websocket @@ -476,9 +484,18 @@ func (h *heartbeat) start() { klog.V(8).Infof("Websocket Ping succeeeded") } else { klog.Errorf("Websocket Ping failed: %v", err) - // Continue, in case this is a transient failure. - // c.conn.CloseChan above will tell us when the connection is - // actually closed. + if errors.Is(err, gwebsocket.ErrCloseSent) { + // we continue because c.conn.CloseChan will manage closing the connection already + continue + } else if e, ok := err.(net.Error); ok && e.Timeout() { + // Continue, in case this is a transient failure. + // c.conn.CloseChan above will tell us when the connection is + // actually closed. + // If Temporary function hadn't been deprecated, we would have used it. + // But most of temporary errors are timeout errors anyway. + continue + } + return } } } diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/websocket_test.go b/staging/src/k8s.io/client-go/tools/remotecommand/websocket_test.go index 2b0be67c05505..61df2b77a4ca3 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/websocket_test.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/websocket_test.go @@ -36,7 +36,7 @@ import ( gwebsocket "github.com/gorilla/websocket" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/httpstream/wsstream" @@ -74,7 +74,7 @@ func TestWebSocketClient_LoopbackStdinToStdout(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -149,7 +149,7 @@ func TestWebSocketClient_DifferentBufferSizes(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -223,7 +223,7 @@ func TestWebSocketClient_LoopbackStdinAsPipe(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -304,7 +304,7 @@ func TestWebSocketClient_LoopbackStdinToStderr(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -377,7 +377,7 @@ func TestWebSocketClient_MultipleReadChannels(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -479,7 +479,7 @@ func TestWebSocketClient_ErrorStream(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -637,7 +637,7 @@ func TestWebSocketClient_MultipleWriteChannels(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -723,7 +723,7 @@ func TestWebSocketClient_ProtocolVersions(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -766,11 +766,14 @@ func TestWebSocketClient_ProtocolVersions(t *testing.T) { func TestWebSocketClient_BadHandshake(t *testing.T) { // Create fake WebSocket server (supports V5 subprotocol). websocketServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - conns, err := webSocketServerStreams(req, w, streamOptionsFromRequest(req)) - if err != nil { - t.Fatalf("error on webSocketServerStreams: %v", err) + // Bad handshake means websocket server will not completely initialize. + _, err := webSocketServerStreams(req, w, streamOptionsFromRequest(req)) + if err == nil { + t.Fatalf("expected error, but received none.") + } + if !strings.Contains(err.Error(), "websocket server finished before becoming ready") { + t.Errorf("expected websocket server error, but got: %v", err) } - defer conns.conn.Close() })) defer websocketServer.Close() @@ -779,7 +782,7 @@ func TestWebSocketClient_BadHandshake(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -831,7 +834,7 @@ func TestWebSocketClient_HeartbeatTimeout(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -909,7 +912,7 @@ func TestWebSocketClient_TextMessageTypeError(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -970,7 +973,7 @@ func TestWebSocketClient_EmptyMessageHandled(t *testing.T) { if err != nil { t.Fatalf("Unable to parse WebSocket server URL: %s", websocketServer.URL) } - exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "POST", websocketServer.URL) + exec, err := NewWebSocketExecutor(&rest.Config{Host: websocketLocation.Host}, "GET", websocketServer.URL) if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -1009,14 +1012,14 @@ func TestWebSocketClient_ExecutorErrors(t *testing.T) { ExecProvider: &clientcmdapi.ExecConfig{}, AuthProvider: &clientcmdapi.AuthProviderConfig{}, } - _, err := NewWebSocketExecutor(&config, "POST", "http://localhost") + _, err := NewWebSocketExecutor(&config, "GET", "http://localhost") if err == nil { t.Errorf("expecting executor constructor error, but received none.") } else if !strings.Contains(err.Error(), "error creating websocket transports") { t.Errorf("expecting error creating transports, got (%s)", err.Error()) } // Verify that a nil context will cause an error in StreamWithContext - exec, err := NewWebSocketExecutor(&rest.Config{}, "POST", "http://localhost") + exec, err := NewWebSocketExecutor(&rest.Config{}, "GET", "http://localhost") if err != nil { t.Errorf("unexpected error creating websocket executor: %v", err) } @@ -1054,7 +1057,12 @@ func TestWebSocketClient_HeartbeatSucceeds(t *testing.T) { t.Fatalf("unable to upgrade to create websocket connection: %v", err) } defer conn.Close() - conn.ReadMessage() //nolint:errcheck + for { + _, _, err := conn.ReadMessage() + if err != nil { + break + } + } })) defer websocketServer.Close() // Create a raw websocket client, connecting to the websocket server. @@ -1067,8 +1075,8 @@ func TestWebSocketClient_HeartbeatSucceeds(t *testing.T) { // Create a heartbeat using the client websocket connection, and start it. // "period" is less than "deadline", so ping/pong heartbeat will succceed. var expectedMsg = "test heartbeat message" - var period = 10 * time.Millisecond - var deadline = 20 * time.Millisecond + var period = 100 * time.Millisecond + var deadline = 200 * time.Millisecond heartbeat := newHeartbeat(client, period, deadline) heartbeat.setMessage(expectedMsg) // Add a channel to the handler to retrieve the "pong" message. @@ -1079,7 +1087,20 @@ func TestWebSocketClient_HeartbeatSucceeds(t *testing.T) { return pongHandler(msg) }) go heartbeat.start() - go client.ReadMessage() //nolint:errcheck + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + _, _, err := client.ReadMessage() + if err != nil { + t.Logf("client err reading message: %v", err) + return + } + } + }() + select { case actualMsg := <-pongMsgCh: close(heartbeat.closer) @@ -1092,6 +1113,7 @@ func TestWebSocketClient_HeartbeatSucceeds(t *testing.T) { close(heartbeat.closer) t.Errorf("unexpected heartbeat timeout") } + wg.Wait() } func TestWebSocketClient_StreamsAndExpectedErrors(t *testing.T) { @@ -1297,7 +1319,16 @@ func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *opti resizeStream: streams[remotecommand.StreamResize], } - wsStreams.writeStatus = v4WriteStatusFunc(streams[remotecommand.StreamErr]) + wsStreams.writeStatus = func(stream io.Writer) func(status *apierrors.StatusError) error { + return func(status *apierrors.StatusError) error { + bs, err := json.Marshal(status.Status()) + if err != nil { + return err + } + _, err = stream.Write(bs) + return err + } + }(streams[remotecommand.StreamErr]) return wsStreams, nil } diff --git a/staging/src/k8s.io/client-go/transport/spdy/spdy.go b/staging/src/k8s.io/client-go/transport/spdy/spdy.go index f50b68e5ffb55..9fddc6c5f23ce 100644 --- a/staging/src/k8s.io/client-go/transport/spdy/spdy.go +++ b/staging/src/k8s.io/client-go/transport/spdy/spdy.go @@ -43,11 +43,15 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er if config.Proxy != nil { proxy = config.Proxy } - upgradeRoundTripper := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxy, - PingPeriod: time.Second * 5, + upgradeRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxy, + PingPeriod: time.Second * 5, + UpgradeTransport: nil, }) + if err != nil { + return nil, nil, err + } wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) if err != nil { return nil, nil, err diff --git a/staging/src/k8s.io/client-go/transport/websocket/roundtripper.go b/staging/src/k8s.io/client-go/transport/websocket/roundtripper.go index e2a4a8abccf83..010f916bc7b26 100644 --- a/staging/src/k8s.io/client-go/transport/websocket/roundtripper.go +++ b/staging/src/k8s.io/client-go/transport/websocket/roundtripper.go @@ -108,10 +108,7 @@ func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response } wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header) if err != nil { - if err != gwebsocket.ErrBadHandshake { - return nil, err - } - return nil, fmt.Errorf("unable to upgrade connection: %v", err) + return nil, &httpstream.UpgradeFailureError{Cause: err} } rt.Conn = wsConn @@ -155,7 +152,7 @@ func Negotiate(rt http.RoundTripper, connectionInfo ConnectionHolder, req *http. req.Header[httpstream.HeaderProtocolVersion] = protocols resp, err := rt.RoundTrip(req) if err != nil { - return nil, fmt.Errorf("error sending request: %v", err) + return nil, err } err = resp.Body.Close() if err != nil { diff --git a/staging/src/k8s.io/client-go/transport/websocket/roundtripper_test.go b/staging/src/k8s.io/client-go/transport/websocket/roundtripper_test.go index 168d5d5509b90..16bfbf570bad9 100644 --- a/staging/src/k8s.io/client-go/transport/websocket/roundtripper_test.go +++ b/staging/src/k8s.io/client-go/transport/websocket/roundtripper_test.go @@ -49,7 +49,7 @@ func TestWebSocketRoundTripper_RoundTripperSucceeds(t *testing.T) { // Create the wrapped roundtripper and websocket upgrade roundtripper and call "RoundTrip()". websocketLocation, err := url.Parse(websocketServer.URL) require.NoError(t, err) - req, err := http.NewRequestWithContext(context.Background(), "POST", websocketServer.URL, nil) + req, err := http.NewRequestWithContext(context.Background(), "GET", websocketServer.URL, nil) require.NoError(t, err) rt, wsRt, err := RoundTripperFor(&restclient.Config{Host: websocketLocation.Host}) require.NoError(t, err) @@ -67,18 +67,17 @@ func TestWebSocketRoundTripper_RoundTripperSucceeds(t *testing.T) { func TestWebSocketRoundTripper_RoundTripperFails(t *testing.T) { // Create fake WebSocket server. websocketServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - conns, err := webSocketServerStreams(req, w) - if err != nil { - t.Fatalf("error on webSocketServerStreams: %v", err) - } - defer conns.conn.Close() + // Bad handshake means websocket server will not completely initialize. + _, err := webSocketServerStreams(req, w) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "websocket server finished before becoming ready")) })) defer websocketServer.Close() // Create the wrapped roundtripper and websocket upgrade roundtripper and call "RoundTrip()". websocketLocation, err := url.Parse(websocketServer.URL) require.NoError(t, err) - req, err := http.NewRequestWithContext(context.Background(), "POST", websocketServer.URL, nil) + req, err := http.NewRequestWithContext(context.Background(), "GET", websocketServer.URL, nil) require.NoError(t, err) rt, _, err := RoundTripperFor(&restclient.Config{Host: websocketLocation.Host}) require.NoError(t, err) @@ -105,7 +104,7 @@ func TestWebSocketRoundTripper_NegotiateCreatesConnection(t *testing.T) { // Create the websocket roundtripper and call "Negotiate" to create websocket connection. websocketLocation, err := url.Parse(websocketServer.URL) require.NoError(t, err) - req, err := http.NewRequestWithContext(context.Background(), "POST", websocketServer.URL, nil) + req, err := http.NewRequestWithContext(context.Background(), "GET", websocketServer.URL, nil) require.NoError(t, err) rt, wsRt, err := RoundTripperFor(&restclient.Config{Host: websocketLocation.Host}) require.NoError(t, err) diff --git a/staging/src/k8s.io/client-go/util/workqueue/queue_test.go b/staging/src/k8s.io/client-go/util/workqueue/queue_test.go index de782035cc4e2..e2a33973c0cca 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/queue_test.go +++ b/staging/src/k8s.io/client-go/util/workqueue/queue_test.go @@ -197,6 +197,72 @@ func TestReinsert(t *testing.T) { } } +func TestCollapse(t *testing.T) { + q := workqueue.New() + // Add a new one twice + q.Add("bar") + q.Add("bar") + + // It should get the new one + i, _ := q.Get() + if i != "bar" { + t.Errorf("Expected %v, got %v", "bar", i) + } + + // Finish that one up + q.Done(i) + + // There should be no more objects in the queue + if a := q.Len(); a != 0 { + t.Errorf("Expected queue to be empty. Has %v items", a) + } +} + +func TestCollapseWhileProcessing(t *testing.T) { + q := workqueue.New() + q.Add("foo") + + // Start processing + i, _ := q.Get() + if i != "foo" { + t.Errorf("Expected %v, got %v", "foo", i) + } + + // Add the same one twice + q.Add("foo") + q.Add("foo") + + waitCh := make(chan struct{}) + // simulate another worker consuming the queue + go func() { + defer close(waitCh) + i, _ := q.Get() + if i != "foo" { + t.Errorf("Expected %v, got %v", "foo", i) + } + // Finish that one up + q.Done(i) + }() + + // give the worker some head start to avoid races + // on the select statement that cause flakiness + time.Sleep(100 * time.Millisecond) + // Finish the first one to unblock the other worker + select { + case <-waitCh: + t.Errorf("worker should be blocked until we are done") + default: + q.Done("foo") + } + + // wait for the worker to consume the new object + // There should be no more objects in the queue + <-waitCh + if a := q.Len(); a != 0 { + t.Errorf("Expected queue to be empty. Has %v items", a) + } +} + func TestQueueDrainageUsingShutDownWithDrain(t *testing.T) { q := workqueue.New() diff --git a/staging/src/k8s.io/cloud-provider/app/controllermanager.go b/staging/src/k8s.io/cloud-provider/app/controllermanager.go index 75d38a4e2f652..d9069dc7d7688 100644 --- a/staging/src/k8s.io/cloud-provider/app/controllermanager.go +++ b/staging/src/k8s.io/cloud-provider/app/controllermanager.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/healthz" utilfeature "k8s.io/apiserver/pkg/util/feature" - cacheddiscovery "k8s.io/client-go/discovery/cached" + cacheddiscovery "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/informers" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/metadata" @@ -202,9 +202,9 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface // Start the controller manager HTTP server if c.SecureServing != nil { unsecuredMux := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Generic.Debugging, healthzHandler) - if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { - slis.SLIMetricsWithReset{}.Install(unsecuredMux) - } + + slis.SLIMetricsWithReset{}.Install(unsecuredMux) + handler := genericcontrollermanager.BuildHandlerChain(unsecuredMux, &c.Authorization, &c.Authentication) // TODO: handle stoppedCh and listenerStoppedCh returned by c.SecureServing.Serve if _, _, err := c.SecureServing.Serve(handler, 0, stopCh); err != nil { diff --git a/staging/src/k8s.io/cloud-provider/cloud.go b/staging/src/k8s.io/cloud-provider/cloud.go index c9a04085f4809..482656b3703ed 100644 --- a/staging/src/k8s.io/cloud-provider/cloud.go +++ b/staging/src/k8s.io/cloud-provider/cloud.go @@ -98,6 +98,8 @@ func DefaultLoadBalancerName(service *v1.Service) string { } // GetInstanceProviderID builds a ProviderID for a node in a cloud. +// Note that if the instance does not exist, we must return ("", cloudprovider.InstanceNotFound) +// cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped/sleeping func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) { instances, ok := cloud.Instances() if !ok { @@ -108,8 +110,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. if err == NotImplemented { return "", err } + if err == InstanceNotFound { + return "", err + } - return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + return "", fmt.Errorf("failed to get instance ID from cloud provider: %w", err) } return cloud.ProviderName() + "://" + instanceID, nil } diff --git a/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go b/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go index aa6601facd71a..b8a50e42cdd64 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go @@ -152,7 +152,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { // At this point the node has NotReady status, we need to check if the node has been removed // from the cloud provider. If node cannot be found in cloudprovider, then delete the node - exists, err := ensureNodeExistsByProviderID(ctx, c.cloud, node) + exists, err := c.ensureNodeExistsByProviderID(ctx, node) if err != nil { klog.Errorf("error checking if node %s exists: %v", node.Name, err) continue @@ -180,7 +180,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { // Node exists. We need to check this to get taint working in similar in all cloudproviders // current problem is that shutdown nodes are not working in similar way ie. all cloudproviders // does not delete node from kubernetes cluster when instance it is shutdown see issue #46442 - shutdown, err := shutdownInCloudProvider(ctx, c.cloud, node) + shutdown, err := c.shutdownInCloudProvider(ctx, node) if err != nil { klog.Errorf("error checking if node %s is shutdown: %v", node.Name, err) } @@ -196,18 +196,49 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) { } } +// getProviderID returns the provider ID for the node. If Node CR has no provider ID, +// it will be the one from the cloud provider. +func (c *CloudNodeLifecycleController) getProviderID(ctx context.Context, node *v1.Node) (string, error) { + if node.Spec.ProviderID != "" { + return node.Spec.ProviderID, nil + } + + if instanceV2, ok := c.cloud.InstancesV2(); ok { + metadata, err := instanceV2.InstanceMetadata(ctx, node) + if err != nil { + return "", err + } + return metadata.ProviderID, nil + } + + providerID, err := cloudprovider.GetInstanceProviderID(ctx, c.cloud, types.NodeName(node.Name)) + if err != nil { + return "", err + } + + return providerID, nil +} + // shutdownInCloudProvider returns true if the node is shutdown on the cloud provider -func shutdownInCloudProvider(ctx context.Context, cloud cloudprovider.Interface, node *v1.Node) (bool, error) { - if instanceV2, ok := cloud.InstancesV2(); ok { +func (c *CloudNodeLifecycleController) shutdownInCloudProvider(ctx context.Context, node *v1.Node) (bool, error) { + if instanceV2, ok := c.cloud.InstancesV2(); ok { return instanceV2.InstanceShutdown(ctx, node) } - instances, ok := cloud.Instances() + instances, ok := c.cloud.Instances() if !ok { return false, errors.New("cloud provider does not support instances") } - shutdown, err := instances.InstanceShutdownByProviderID(ctx, node.Spec.ProviderID) + providerID, err := c.getProviderID(ctx, node) + if err != nil { + if err == cloudprovider.InstanceNotFound { + return false, nil + } + return false, err + } + + shutdown, err := instances.InstanceShutdownByProviderID(ctx, providerID) if err == cloudprovider.NotImplemented { return false, nil } @@ -216,32 +247,22 @@ func shutdownInCloudProvider(ctx context.Context, cloud cloudprovider.Interface, } // ensureNodeExistsByProviderID checks if the instance exists by the provider id, -// If provider id in spec is empty it calls instanceId with node name to get provider id -func ensureNodeExistsByProviderID(ctx context.Context, cloud cloudprovider.Interface, node *v1.Node) (bool, error) { - if instanceV2, ok := cloud.InstancesV2(); ok { +func (c *CloudNodeLifecycleController) ensureNodeExistsByProviderID(ctx context.Context, node *v1.Node) (bool, error) { + if instanceV2, ok := c.cloud.InstancesV2(); ok { return instanceV2.InstanceExists(ctx, node) } - instances, ok := cloud.Instances() + instances, ok := c.cloud.Instances() if !ok { return false, errors.New("instances interface not supported in the cloud provider") } - providerID := node.Spec.ProviderID - if providerID == "" { - var err error - providerID, err = instances.InstanceID(ctx, types.NodeName(node.Name)) - if err != nil { - if err == cloudprovider.InstanceNotFound { - return false, nil - } - return false, err - } - - if providerID == "" { - klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name) + providerID, err := c.getProviderID(ctx, node) + if err != nil { + if err == cloudprovider.InstanceNotFound { return false, nil } + return false, err } return instances.InstanceExistsByProviderID(ctx, providerID) diff --git a/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller_test.go index 2f71eb2b07bd4..2132831d44a88 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller_test.go @@ -19,6 +19,7 @@ package cloud import ( "context" "errors" + "github.com/google/go-cmp/cmp" "reflect" "testing" "time" @@ -32,6 +33,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" fakecloud "k8s.io/cloud-provider/fake" "k8s.io/klog/v2" ) @@ -598,6 +600,165 @@ func Test_NodesShutdown(t *testing.T) { ErrShutdownByProviderID: nil, }, }, + { + name: "node with empty spec providerID is not ready and was shutdown, but exists", + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), + }, + Spec: v1.NodeSpec{ + ProviderID: "", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + }, + }, + }, + }, + expectedNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), + }, + Spec: v1.NodeSpec{ + ProviderID: "", + Taints: []v1.Taint{ + *ShutdownTaint, + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + }, + }, + }, + }, + expectedDeleted: false, + fakeCloud: &fakecloud.Cloud{ + NodeShutdown: true, + ExistsByProviderID: true, + ErrShutdownByProviderID: nil, + ExtID: map[types.NodeName]string{ + types.NodeName("node0"): "foo://12345", + }, + }, + }, + { + name: "node with non-existing providerID (missing in cloud provider) gets deleted", + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), + }, + Spec: v1.NodeSpec{ + ProviderID: "", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + }, + }, + }, + }, + expectedDeleted: true, + fakeCloud: &fakecloud.Cloud{ + ErrShutdownByProviderID: nil, + ExtID: map[types.NodeName]string{ + types.NodeName("node0"): "", + }, + }, + }, + { + name: "node with error when getting providerID does not have shutdown taint", + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), + }, + Spec: v1.NodeSpec{ + ProviderID: "", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + }, + }, + }, + }, + expectedNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), + }, + Spec: v1.NodeSpec{ + ProviderID: "", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + }, + }, + }, + }, + expectedDeleted: false, + fakeCloud: &fakecloud.Cloud{ + ErrShutdownByProviderID: nil, + ExtIDErr: map[types.NodeName]error{ + types.NodeName("node0"): errors.New("err!"), + }, + }, + }, + { + name: "node with InstanceID returning InstanceNotFound gets deleted", + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local), + }, + Spec: v1.NodeSpec{ + ProviderID: "", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local), + }, + }, + }, + }, + expectedDeleted: true, + fakeCloud: &fakecloud.Cloud{ + ErrShutdownByProviderID: nil, + ExtIDErr: map[types.NodeName]error{ + types.NodeName("node0"): cloudprovider.InstanceNotFound, + }, + }, + }, { name: "node is not ready, but there is error checking if node is shutdown", existingNode: &v1.Node{ @@ -749,6 +910,157 @@ func Test_NodesShutdown(t *testing.T) { } } +func Test_GetProviderID(t *testing.T) { + testcases := []struct { + name string + fakeCloud *fakecloud.Cloud + existingNode *v1.Node + expectedProviderID string + expectedErr error + }{ + { + name: "node initialized with provider ID", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: false, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: "fake://12345", + }, + }, + expectedProviderID: "fake://12345", + expectedErr: nil, + }, + { + name: "node initialized with provider ID with InstancesV2", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: true, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + Spec: v1.NodeSpec{ + ProviderID: "fake://12345", + }, + }, + expectedProviderID: "fake://12345", + expectedErr: nil, + }, + { + name: "cloud implemented with Instances", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: false, + ExtID: map[types.NodeName]string{ + types.NodeName("node0"): "12345", + }, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + }, + expectedProviderID: "fake://12345", + expectedErr: nil, + }, + { + name: "cloud implemented with InstancesV2", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: true, + ProviderID: map[types.NodeName]string{ + types.NodeName("node0"): "fake://12345", + }, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + }, + expectedProviderID: "fake://12345", + expectedErr: nil, + }, + { + name: "cloud implemented with InstancesV2 (without providerID)", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: true, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + }, + expectedProviderID: "", + expectedErr: nil, + }, + { + name: "cloud implemented with Instances with instance missing", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: false, + ExtIDErr: map[types.NodeName]error{ + types.NodeName("node0"): cloudprovider.InstanceNotFound, + }, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + }, + expectedProviderID: "", + expectedErr: cloudprovider.InstanceNotFound, + }, + { + name: "cloud implemented with Instances with unknown error", + fakeCloud: &fakecloud.Cloud{ + EnableInstancesV2: false, + ExtIDErr: map[types.NodeName]error{ + types.NodeName("node0"): errors.New("unknown error"), + }, + Err: nil, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + }, + }, + expectedProviderID: "", + expectedErr: errors.New("failed to get instance ID from cloud provider: unknown error"), + }, + } + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + cloudNodeLifecycleController := &CloudNodeLifecycleController{ + cloud: testcase.fakeCloud, + } + + providerID, err := cloudNodeLifecycleController.getProviderID(context.TODO(), testcase.existingNode) + + if err != nil && testcase.expectedErr == nil { + t.Fatalf("unexpected error: %v", err) + } + if err == nil && testcase.expectedErr != nil { + t.Fatalf("did not get expected error %q", testcase.expectedErr) + } + if err != nil && err.Error() != testcase.expectedErr.Error() { + t.Fatalf("expected error %q, got %q", testcase.expectedErr.Error(), err.Error()) + } + + if !cmp.Equal(providerID, testcase.expectedProviderID) { + t.Errorf("unexpected providerID %s", cmp.Diff(providerID, testcase.expectedProviderID)) + } + }) + } +} + func syncNodeStore(nodeinformer coreinformers.NodeInformer, f *fake.Clientset) error { nodes, err := f.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go index ef9259a032551..5a6a5079f829a 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go @@ -351,6 +351,7 @@ type loadBalancerOperation int const ( deleteLoadBalancer loadBalancerOperation = iota ensureLoadBalancer + maxNodeNamesToLog = 20 ) // syncLoadBalancerIfNeeded ensures that service's status is synced up with loadbalancer @@ -678,6 +679,15 @@ func nodeNames(nodes []*v1.Node) sets.String { return ret } +func loggableNodeNames(nodes []*v1.Node) []string { + if len(nodes) > maxNodeNamesToLog { + skipped := len(nodes) - maxNodeNamesToLog + names := nodeNames(nodes[:maxNodeNamesToLog]).List() + return append(names, fmt.Sprintf("<%d more>", skipped)) + } + return nodeNames(nodes).List() +} + func shouldSyncUpdatedNode(oldNode, newNode *v1.Node) bool { // Evaluate the individual node exclusion predicate before evaluating the // compounded result of all predicates. We don't sync changes on the @@ -735,9 +745,11 @@ func (c *Controller) nodeSyncService(svc *v1.Service, oldNodes, newNodes []*v1.N } newNodes = filterWithPredicates(newNodes, getNodePredicatesForService(svc)...) oldNodes = filterWithPredicates(oldNodes, getNodePredicatesForService(svc)...) - if nodeNames(newNodes).Equal(nodeNames(oldNodes)) { + + if nodesSufficientlyEqual(oldNodes, newNodes) { return retSuccess } + klog.V(4).Infof("nodeSyncService started for service %s/%s", svc.Namespace, svc.Name) if err := c.lockedUpdateLoadBalancerHosts(svc, newNodes); err != nil { runtime.HandleError(fmt.Errorf("failed to update load balancer hosts for service %s/%s: %v", svc.Namespace, svc.Name, err)) @@ -748,6 +760,34 @@ func (c *Controller) nodeSyncService(svc *v1.Service, oldNodes, newNodes []*v1.N return retSuccess } +func nodesSufficientlyEqual(oldNodes, newNodes []*v1.Node) bool { + if len(oldNodes) != len(newNodes) { + return false + } + + // This holds the Node fields which trigger a sync when changed. + type protoNode struct { + providerID string + } + distill := func(n *v1.Node) protoNode { + return protoNode{ + providerID: n.Spec.ProviderID, + } + } + + mOld := map[string]protoNode{} + for _, n := range oldNodes { + mOld[n.Name] = distill(n) + } + + mNew := map[string]protoNode{} + for _, n := range newNodes { + mNew[n.Name] = distill(n) + } + + return reflect.DeepEqual(mOld, mNew) +} + // updateLoadBalancerHosts updates all existing load balancers so that // they will match the latest list of nodes with input number of workers. // Returns the list of services that couldn't be updated. @@ -791,8 +831,8 @@ func (c *Controller) lockedUpdateLoadBalancerHosts(service *v1.Service, hosts [] klog.V(4).Infof("It took %v seconds to update load balancer hosts for service %s/%s", latency, service.Namespace, service.Name) updateLoadBalancerHostLatency.Observe(latency) }() + klog.V(2).Infof("Updating backends for load balancer %s/%s with %d nodes: %v", service.Namespace, service.Name, len(hosts), loggableNodeNames(hosts)) - klog.V(2).Infof("Updating backends for load balancer %s/%s with node set: %v", service.Namespace, service.Name, nodeNames(hosts)) // This operation doesn't normally take very long (and happens pretty often), so we only record the final event err := c.balancer.UpdateLoadBalancer(context.TODO(), c.clusterName, service, hosts) if err == nil { @@ -817,7 +857,7 @@ func (c *Controller) lockedUpdateLoadBalancerHosts(service *v1.Service, hosts [] return nil } - c.eventRecorder.Eventf(service, v1.EventTypeWarning, "UpdateLoadBalancerFailed", "Error updating load balancer with new hosts %v: %v", nodeNames(hosts), err) + c.eventRecorder.Eventf(service, v1.EventTypeWarning, "UpdateLoadBalancerFailed", "Error updating load balancer with new hosts %v [node names limited, total number of nodes: %d], error: %v", loggableNodeNames(hosts), len(hosts), err) return err } @@ -962,6 +1002,7 @@ var ( etpLocalNodePredicates []NodeConditionPredicate = []NodeConditionPredicate{ nodeIncludedPredicate, nodeUnTaintedPredicate, + nodeReadyPredicate, } stableNodeSetPredicates []NodeConditionPredicate = []NodeConditionPredicate{ nodeNotDeletedPredicate, diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go index 62909f8823073..2fa2a7fb35aaf 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go @@ -523,6 +523,8 @@ func TestNodeChangesForExternalTrafficPolicyLocalServices(t *testing.T) { }, }, expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ + {Service: etpLocalservice1, Hosts: []*v1.Node{node1, node3}}, + {Service: etpLocalservice2, Hosts: []*v1.Node{node1, node3}}, {Service: service3, Hosts: []*v1.Node{node1, node3}}, }, }, { @@ -547,6 +549,8 @@ func TestNodeChangesForExternalTrafficPolicyLocalServices(t *testing.T) { }, }, expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ + {Service: etpLocalservice1, Hosts: []*v1.Node{node1, node2, node3}}, + {Service: etpLocalservice2, Hosts: []*v1.Node{node1, node2, node3}}, {Service: service3, Hosts: []*v1.Node{node1, node2, node3}}, }, }, { @@ -897,6 +901,65 @@ func compareUpdateCalls(t *testing.T, left, right []fakecloud.UpdateBalancerCall } } +func TestNodesNotEqual(t *testing.T) { + controller, cloud, _ := newController() + + services := []*v1.Service{ + newService("s0", v1.ServiceTypeLoadBalancer), + newService("s1", v1.ServiceTypeLoadBalancer), + } + + node1 := makeNode(tweakName("node1")) + node2 := makeNode(tweakName("node2")) + node3 := makeNode(tweakName("node3")) + node1WithProviderID := makeNode(tweakName("node1"), tweakProviderID("cumulus/1")) + node2WithProviderID := makeNode(tweakName("node2"), tweakProviderID("cumulus/2")) + + testCases := []struct { + desc string + lastSyncNodes []*v1.Node + newNodes []*v1.Node + expectedUpdateCalls []fakecloud.UpdateBalancerCall + }{ + { + desc: "Nodes with updated providerID", + lastSyncNodes: []*v1.Node{node1, node2}, + newNodes: []*v1.Node{node1WithProviderID, node2WithProviderID}, + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ + {Service: newService("s0", v1.ServiceTypeLoadBalancer), Hosts: []*v1.Node{node1WithProviderID, node2WithProviderID}}, + {Service: newService("s1", v1.ServiceTypeLoadBalancer), Hosts: []*v1.Node{node1WithProviderID, node2WithProviderID}}, + }, + }, + { + desc: "Nodes unchanged", + lastSyncNodes: []*v1.Node{node1WithProviderID, node2}, + newNodes: []*v1.Node{node1WithProviderID, node2}, + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{}, + }, + { + desc: "Change node with empty providerID", + lastSyncNodes: []*v1.Node{node1WithProviderID, node2}, + newNodes: []*v1.Node{node1WithProviderID, node3}, + expectedUpdateCalls: []fakecloud.UpdateBalancerCall{ + {Service: newService("s0", v1.ServiceTypeLoadBalancer), Hosts: []*v1.Node{node1WithProviderID, node3}}, + {Service: newService("s1", v1.ServiceTypeLoadBalancer), Hosts: []*v1.Node{node1WithProviderID, node3}}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + controller.nodeLister = newFakeNodeLister(nil, tc.newNodes...) + controller.lastSyncedNodes = tc.lastSyncNodes + controller.updateLoadBalancerHosts(ctx, services, 5) + compareUpdateCalls(t, tc.expectedUpdateCalls, cloud.UpdateCalls) + cloud.UpdateCalls = []fakecloud.UpdateBalancerCall{} + }) + } +} + func TestProcessServiceCreateOrUpdate(t *testing.T) { controller, _, client := newController() diff --git a/staging/src/k8s.io/cloud-provider/fake/fake.go b/staging/src/k8s.io/cloud-provider/fake/fake.go index a0ec63997297e..7fcda129569c8 100644 --- a/staging/src/k8s.io/cloud-provider/fake/fake.go +++ b/staging/src/k8s.io/cloud-provider/fake/fake.go @@ -312,6 +312,11 @@ func (f *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin // InstanceShutdownByProviderID returns true if the instances is in safe state to detach volumes func (f *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { f.addCall("instance-shutdown-by-provider-id") + + if providerID == "" { + return false, fmt.Errorf("cannot shutdown instance with empty providerID") + } + return f.NodeShutdown, f.ErrShutdownByProviderID } diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index b3a9288b189f5..460932bcbf3ac 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -2,13 +2,13 @@ module k8s.io/cloud-provider -go 1.20 +go 1.21.3 require ( github.com/google/go-cmp v0.5.9 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -32,10 +32,10 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.2.3 // indirect @@ -50,7 +50,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -71,41 +71,40 @@ require ( go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/otel/trace v1.10.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kms v0.0.0 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum index aadbe940bec8b..aeb34ac84ab0b 100644 --- a/staging/src/k8s.io/cloud-provider/go.sum +++ b/staging/src/k8s.io/cloud-provider/go.sum @@ -1,164 +1,127 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -175,25 +138,12 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -208,27 +158,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -252,78 +192,31 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -333,11 +226,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -349,8 +239,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -378,10 +266,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -391,7 +279,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= @@ -399,7 +286,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -407,7 +293,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -424,16 +309,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= @@ -451,32 +334,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -489,299 +364,84 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -789,25 +449,15 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go b/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go index e31a7224b2b95..bc1ff0e5d89e2 100644 --- a/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go +++ b/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go @@ -784,7 +784,7 @@ func TestSelectZoneForVolume(t *testing.T) { t.Errorf("Unexpected error from SelectZoneForVolume for %s; Error: %v", test.Name, err) } - if test.ExpectSpecificZone == true { + if test.ExpectSpecificZone { if zone != test.ExpectedZone { t.Errorf("Expected zone %v does not match obtained zone %v for %s", test.ExpectedZone, zone, test.Name) } diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod index 9e36445f3eb6c..19618f80235ae 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.mod +++ b/staging/src/k8s.io/cluster-bootstrap/go.mod @@ -2,10 +2,10 @@ module k8s.io/cluster-bootstrap -go 1.20 +go 1.21.3 require ( - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 gopkg.in/square/go-jose.v2 v2.6.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 @@ -21,9 +21,9 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/text v0.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum index 1f683e8ca2443..64fec7049c11a 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -38,7 +38,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -47,20 +48,17 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -68,26 +66,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -103,12 +101,11 @@ gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/cluster-bootstrap/token/util/helpers.go b/staging/src/k8s.io/cluster-bootstrap/token/util/helpers.go index f9ea35b5ee3c0..31379c5d6868b 100644 --- a/staging/src/k8s.io/cluster-bootstrap/token/util/helpers.go +++ b/staging/src/k8s.io/cluster-bootstrap/token/util/helpers.go @@ -17,9 +17,9 @@ limitations under the License. package util import ( - "bufio" "crypto/rand" "fmt" + "math/big" "regexp" "strings" @@ -59,29 +59,21 @@ func GenerateBootstrapToken() (string, error) { // randBytes returns a random string consisting of the characters in // validBootstrapTokenChars, with the length customized by the parameter func randBytes(length int) (string, error) { - // len("0123456789abcdefghijklmnopqrstuvwxyz") = 36 which doesn't evenly divide - // the possible values of a byte: 256 mod 36 = 4. Discard any random bytes we - // read that are >= 252 so the bytes we evenly divide the character set. - const maxByteValue = 252 - var ( - b byte - err error token = make([]byte, length) + max = new(big.Int).SetUint64(uint64(len(validBootstrapTokenChars))) ) - reader := bufio.NewReaderSize(rand.Reader, length*2) for i := range token { - for { - if b, err = reader.ReadByte(); err != nil { - return "", err - } - if b < maxByteValue { - break - } + val, err := rand.Int(rand.Reader, max) + if err != nil { + return "", fmt.Errorf("could not generate random integer: %w", err) } - - token[i] = validBootstrapTokenChars[int(b)%len(validBootstrapTokenChars)] + // Use simple operations in constant-time to obtain a byte in the a-z,0-9 + // character range + x := val.Uint64() + res := x + 48 + (39 & ((9 - x) >> 8)) + token[i] = byte(res) } return string(token), nil @@ -92,10 +84,36 @@ func TokenFromIDAndSecret(id, secret string) string { return fmt.Sprintf("%s.%s", id, secret) } -// IsValidBootstrapToken returns whether the given string is valid as a Bootstrap Token and -// in other words satisfies the BootstrapTokenRegexp +// IsValidBootstrapToken returns whether the given string is valid as a Bootstrap Token. +// Avoid using BootstrapTokenRegexp.MatchString(token) and instead perform constant-time +// comparisons on the secret. func IsValidBootstrapToken(token string) bool { - return BootstrapTokenRegexp.MatchString(token) + // Must be exactly two strings separated by "." + t := strings.Split(token, ".") + if len(t) != 2 { + return false + } + + // Validate the ID: t[0] + // Using a Regexp for it is safe because the ID is public already + if !BootstrapTokenIDRegexp.MatchString(t[0]) { + return false + } + + // Validate the secret with constant-time: t[1] + secret := t[1] + if len(secret) != api.BootstrapTokenSecretBytes { // Must be an exact size + return false + } + for i := range secret { + c := int(secret[i]) + notDigit := (c < 48 || c > 57) // Character is not in the 0-9 range + notLetter := (c < 97 || c > 122) // Character is not in the a-z range + if notDigit && notLetter { + return false + } + } + return true } // IsValidBootstrapTokenID returns whether the given string is valid as a Bootstrap Token ID and diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index dce920ad1999c..28b829cc13902 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -77,7 +77,7 @@ func genStatus(t *types.Type) bool { // hasObjectMeta returns true if the type has a ObjectMeta field. func hasObjectMeta(t *types.Type) bool { for _, m := range t.Members { - if m.Embedded == true && m.Name == "ObjectMeta" { + if m.Embedded && m.Name == "ObjectMeta" { return true } } diff --git a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go index aebfeca41e773..823f2a8e37284 100644 --- a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go +++ b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status.go @@ -204,7 +204,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if ptag != nil { pkgNeedsGeneration, err = strconv.ParseBool(ptag.value) if err != nil { - klog.Fatalf("Package %v: unsupported %s value: %q :%w", i, tagEnabledName, ptag.value, err) + klog.Fatalf("Package %v: unsupported %s value: %q :%v", i, tagEnabledName, ptag.value, err) } } if !pkgNeedsGeneration { diff --git a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status_test.go b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status_test.go index dad1a5fb210bd..628e11297be24 100644 --- a/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status_test.go +++ b/staging/src/k8s.io/code-generator/cmd/prerelease-lifecycle-gen/prerelease-lifecycle-generators/status_test.go @@ -17,10 +17,13 @@ limitations under the License. package prereleaselifecyclegenerators import ( + "fmt" "reflect" + "strconv" "testing" "k8s.io/gengo/types" + "k8s.io/klog/v2" ) var mockType = &types.Type{ @@ -32,6 +35,12 @@ var mockType = &types.Type{ } func Test_extractKubeVersionTag(t *testing.T) { + oldKlogOsExit := klog.OsExit + defer func() { + klog.OsExit = oldKlogOsExit + }() + klog.OsExit = customExit + tests := []struct { name string tagName string @@ -40,6 +49,7 @@ func Test_extractKubeVersionTag(t *testing.T) { wantMajor int wantMinor int wantErr bool + wantFatal bool }{ { name: "not found tag should generate an error", @@ -63,7 +73,7 @@ func Test_extractKubeVersionTag(t *testing.T) { wantMinor: 5, wantErr: false, }, - /*{ + { name: "multiple declarations of same tag should return an error", tagName: "someVersionTag:version", tagComments: []string{ @@ -71,7 +81,7 @@ func Test_extractKubeVersionTag(t *testing.T) { "+someVersionTag:version=v1.7", }, wantValue: nil, - wantErr: true, // TODO: Today it is klog.Fatal, check how to capture it + wantFatal: true, }, { name: "multiple values on same tag should return an error", @@ -80,8 +90,8 @@ func Test_extractKubeVersionTag(t *testing.T) { "+someVersionTag:version=1.5,something", }, wantValue: nil, - wantErr: true, // TODO: Today it is klog.Fatal, check how to capture it - },*/ + wantFatal: true, + }, { name: "wrong tag major value should return an error", tagName: "someVersionTag:version", @@ -126,7 +136,14 @@ func Test_extractKubeVersionTag(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mockType.SecondClosestCommentLines = tt.tagComments - gotTag, gotMajor, gotMinor, err := extractKubeVersionTag(tt.tagName, mockType) + gotTag, gotMajor, gotMinor, err, fatalErr := safeExtractKubeVersionTag(tt.tagName, mockType) + if (fatalErr != nil) != tt.wantFatal { + t.Errorf("extractKubeVersionTag() fatalErr = %v, wantFatal %v", fatalErr, tt.wantFatal) + return + } + if tt.wantFatal { + return + } if (err != nil) != tt.wantErr { t.Errorf("extractKubeVersionTag() error = %v, wantErr %v", err, tt.wantErr) return @@ -146,3 +163,391 @@ func Test_extractKubeVersionTag(t *testing.T) { }) } } + +func customExit(exitCode int) { + panic(strconv.Itoa(exitCode)) +} + +func safeExtractKubeVersionTag(tagName string, t *types.Type) (value *tagValue, major int, minor int, err error, localErr error) { + defer func() { + if e := recover(); e != nil { + localErr = fmt.Errorf("extractKubeVersionTag returned error: %v", e) + } + }() + value, major, minor, err = extractKubeVersionTag(tagName, t) + return +} + +func safeExtractTag(t *testing.T, tagName string, comments []string) (value *tagValue, err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("extractTag returned error: %v", e) + } + }() + value = extractTag(tagName, comments) + return +} + +func Test_extractTag(t *testing.T) { + oldKlogOsExit := klog.OsExit + defer func() { + klog.OsExit = oldKlogOsExit + }() + klog.OsExit = customExit + + comments := []string{ + "+variable=7", + "+anotherVariable=8", + "+yetAnotherVariable=9", + "variableWithoutMarker=10", + "+variableWithoutValue", + "+variable=11", + "+multi-valuedVariable=12,13,14", + "+strangeVariable=15,=16", + } + + tests := []struct { + name string + tagComments []string + variableName string + wantError bool + wantValue *tagValue + }{ + { + name: "variable with explicit value", + tagComments: comments, + variableName: "anotherVariable", + wantValue: &tagValue{value: "8"}, + }, + { + name: "variable without explicit value", + tagComments: comments, + variableName: "variableWithoutValue", + wantValue: &tagValue{value: ""}, + }, + { + name: "variable not present in comments", + tagComments: comments, + variableName: "variableOutOfNowhere", + wantValue: nil, + }, + { + name: "variable without marker test", + tagComments: comments, + variableName: "variableWithoutMarker", + wantValue: nil, + }, + { + name: "abort duplicated variable", + tagComments: comments, + variableName: "variable", + wantError: true, + wantValue: nil, + }, + { + name: "abort variable with multiple values", + tagComments: comments, + variableName: "multi-valuedVariable", + wantError: true, + wantValue: nil, + }, + { + name: "this test documents strange behaviour", // TODO: Is this behaviour intended? + tagComments: comments, + variableName: "strangeVariable", + wantValue: &tagValue{value: "15"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotTag, err := safeExtractTag(t, tt.variableName, tt.tagComments) + if (err != nil) != tt.wantError { + t.Errorf("extractTag() err = %v, wantError = %v.", gotTag, tt.wantError) + return + } + if tt.wantError { + return + } + if !reflect.DeepEqual(gotTag, tt.wantValue) { + t.Errorf("extractTag() got = %v, want %v", gotTag, tt.wantValue) + } + }) + } +} + +func Test_extractEnabledTypeTag(t *testing.T) { + someComments := []string{ + "+variable=7", + "+k8s:prerelease-lifecycle-gen=8", + } + moreComments := []string{ + "+yetAnotherVariable=9", + "variableWithoutMarker=10", + "+variableWithoutValue", + "+variable=11", + "+multi-valuedVariable=12,13,14", + } + + tests := []struct { + name string + mockType *types.Type + wantValue *tagValue + }{ + { + name: "desired info in main comments", + mockType: &types.Type{CommentLines: someComments, SecondClosestCommentLines: moreComments}, + wantValue: &tagValue{value: "8"}, + }, + { + name: "secondary comments empty", + mockType: &types.Type{CommentLines: someComments}, + wantValue: &tagValue{value: "8"}, + }, + { + name: "main comments empty", + mockType: &types.Type{SecondClosestCommentLines: someComments}, + wantValue: &tagValue{value: "8"}, + }, + { + name: "lack of desired info, empty secondary comments", + mockType: &types.Type{CommentLines: moreComments}, + wantValue: nil, + }, + { + name: "lack of desired info, empty main comments", + mockType: &types.Type{SecondClosestCommentLines: moreComments}, + wantValue: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotTag := extractEnabledTypeTag(tt.mockType) + if !reflect.DeepEqual(gotTag, tt.wantValue) { + t.Errorf("extractEnabledTypeTag() got = %v, want %v", gotTag, tt.wantValue) + } + }) + } +} + +func Test_extractReplacementTag(t *testing.T) { + replacementTag := "+k8s:prerelease-lifecycle-gen:replacement" + tests := []struct { + name string + mainComments []string + secondaryComments []string + wantGroup string + wantVersion string + wantKind string + wantHasReplacement bool + wantErr bool + }{ + { + name: "no replacement tag", + mainComments: []string{"randomText=7"}, + secondaryComments: []string{"importantFlag=8.8.8.8"}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: false, + }, + { + name: "replacement tag correct", + mainComments: []string{fmt.Sprintf("%v=my_group,v1,KindOf", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "my_group", + wantVersion: "v1", + wantKind: "KindOf", + wantHasReplacement: true, + wantErr: false, + }, + { + name: "correct replacement tag in secondary comments", + mainComments: []string{}, + secondaryComments: []string{fmt.Sprintf("%v=my_group,v1,KindOf", replacementTag)}, + wantGroup: "my_group", + wantVersion: "v1", + wantKind: "KindOf", + wantHasReplacement: true, + wantErr: false, + }, + { + name: "4 values instead of 3", + mainComments: []string{fmt.Sprintf("%v=my_group,v1,KindOf,subKind", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "4 values instead of 3 in secondary comments", + mainComments: []string{}, + secondaryComments: []string{fmt.Sprintf("%v=my_group,v1,KindOf,subKind", replacementTag)}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "2 values instead of 3", + mainComments: []string{fmt.Sprintf("%v=my_group,v1", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "group name not all upper", + mainComments: []string{fmt.Sprintf("%v=myGroup,v1,KindOf", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "version name does not start with v", + mainComments: []string{fmt.Sprintf("%v=my_group,bestVersion,KindOf", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "kind name does not start with capital", + mainComments: []string{fmt.Sprintf("%v=my_group,v1,kindOf", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "empty group name", // TODO: is it a valid input or a bug? + mainComments: []string{fmt.Sprintf("%v=,v1,KindOf", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "v1", + wantKind: "KindOf", + wantHasReplacement: true, + wantErr: false, + }, + { + name: "empty version", + mainComments: []string{fmt.Sprintf("%v=my_group,,KindOf", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + { + name: "empty kind", + mainComments: []string{fmt.Sprintf("%v=my_group,v1,", replacementTag)}, + secondaryComments: []string{}, + wantGroup: "", + wantVersion: "", + wantKind: "", + wantHasReplacement: false, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + replacementGroup, replacementVersion, replacementKind, hasReplacement, err := extractReplacementTag(&types.Type{ + CommentLines: tt.mainComments, + SecondClosestCommentLines: tt.secondaryComments, + }) + if replacementGroup != tt.wantGroup { + t.Errorf("extractReplacementTag() group got = %v, want %v", replacementGroup, tt.wantGroup) + } + if replacementVersion != tt.wantVersion { + t.Errorf("extractReplacementTag() version got = %v, want %v", replacementVersion, tt.wantVersion) + } + if replacementKind != tt.wantKind { + t.Errorf("extractReplacementTag() kind got = %v, want %v", replacementKind, tt.wantKind) + } + if hasReplacement != tt.wantHasReplacement { + t.Errorf("extractReplacementTag() hasReplacement got = %v, want %v", hasReplacement, tt.wantHasReplacement) + } + if (err != nil) != tt.wantErr { + t.Errorf("extractReplacementTag() err got = %v, want %v", err, tt.wantErr) + } + }) + } +} + +func Test_isAPIType(t *testing.T) { + tests := []struct { + name string + t *types.Type + want bool + }{ + { + name: "private name is not apitype", + want: false, + t: &types.Type{ + Name: types.Name{ + Name: "notpublic", + }, + }, + }, + { + name: "non struct is not apitype", + want: false, + t: &types.Type{ + Name: types.Name{ + Name: "Public", + }, + Kind: types.Slice, + }, + }, + { + name: "contains member type", + want: true, + t: &types.Type{ + Name: types.Name{ + Name: "Public", + }, + Kind: types.Struct, + Members: []types.Member{ + { + Embedded: true, + Name: "TypeMeta", + }, + }, + }, + }, + + { + name: "contains no type", + want: false, + t: &types.Type{ + Name: types.Name{ + Name: "Public", + }, + Kind: types.Struct, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isAPIType(tt.t); got != tt.want { + t.Errorf("isAPIType() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go index 242eb3aa10af4..fa8e3f1c356c7 100644 --- a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -93,7 +93,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat for _, t := range pkg.Types { klog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { - if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { + if typeMember.Name == "TypeMeta" && typeMember.Embedded { typesToRegister = append(typesToRegister, t) } } diff --git a/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go b/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go index efd87870eae99..75ebf34941607 100644 --- a/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/code-generator/examples/apiserver/openapi/zz_generated.openapi.go @@ -527,7 +527,6 @@ func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.Open "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -1083,8 +1082,7 @@ func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -1370,7 +1368,6 @@ func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.Ope "creationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2152,7 +2149,6 @@ func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenA "object": { SchemaProps: spec.SchemaProps{ Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -2351,7 +2347,6 @@ func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.Ope "object": { SchemaProps: spec.SchemaProps{ Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, diff --git a/staging/src/k8s.io/code-generator/examples/go.mod b/staging/src/k8s.io/code-generator/examples/go.mod index 21e3e271c3610..9764dbf789e77 100644 --- a/staging/src/k8s.io/code-generator/examples/go.mod +++ b/staging/src/k8s.io/code-generator/examples/go.mod @@ -2,19 +2,19 @@ module k8s.io/code-generator/examples -go 1.19 +go 1.21.3 require ( k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 sigs.k8s.io/structured-merge-diff/v4 v4.3.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -33,11 +33,11 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/staging/src/k8s.io/code-generator/examples/go.sum b/staging/src/k8s.io/code-generator/examples/go.sum index 866745bca6ff3..a4f9aba8fa5f2 100644 --- a/staging/src/k8s.io/code-generator/examples/go.sum +++ b/staging/src/k8s.io/code-generator/examples/go.sum @@ -2,8 +2,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -16,6 +16,7 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -31,6 +32,7 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -41,6 +43,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -54,14 +57,18 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -69,7 +76,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -82,32 +90,33 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -131,8 +140,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/code-generator/generate-internal-groups.sh b/staging/src/k8s.io/code-generator/generate-internal-groups.sh index db1f93f43ff32..9676fac313b63 100755 --- a/staging/src/k8s.io/code-generator/generate-internal-groups.sh +++ b/staging/src/k8s.io/code-generator/generate-internal-groups.sh @@ -120,6 +120,9 @@ for GVs in ${GROUPS_WITH_VERSIONS}; do done done +CLIENTSET_PKG="${CLIENTSET_PKG_NAME:-clientset}" +CLIENTSET_NAME="${CLIENTSET_NAME_VERSIONED:-versioned}" + if grep -qw "deepcopy" <<<"${GENS}"; then # Nuke existing files for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do @@ -187,9 +190,6 @@ if grep -qw "applyconfiguration" <<<"${GENS}"; then fi if grep -qw "client" <<<"${GENS}"; then - CLIENTSET_PKG="${CLIENTSET_PKG_NAME:-clientset}" - CLIENTSET_NAME="${CLIENTSET_NAME_VERSIONED:-versioned}" - # Nuke existing files root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/${CLIENTSET_PKG}/${CLIENTSET_NAME}" 2>/dev/null || true)" if [ -n "${root}" ]; then diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index d1947c2fd211e..ed303c4033a3e 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -2,26 +2,25 @@ module k8s.io/code-generator -go 1.20 +go 1.21.3 require ( github.com/gogo/protobuf v1.3.2 github.com/google/gnostic-models v0.6.8 github.com/spf13/pflag v1.0.5 gopkg.in/yaml.v2 v2.4.0 - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ) require ( - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -30,14 +29,14 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/onsi/ginkgo/v2 v2.9.4 // indirect - github.com/onsi/gomega v1.27.6 // indirect + github.com/onsi/ginkgo/v2 v2.13.0 // indirect + github.com/onsi/gomega v1.28.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/stretchr/testify v1.8.2 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/tools v0.8.0 // indirect + github.com/stretchr/testify v1.8.4 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum index 5187c12045ce8..a70e651a66c0d 100644 --- a/staging/src/k8s.io/code-generator/go.sum +++ b/staging/src/k8s.io/code-generator/go.sum @@ -7,8 +7,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= @@ -62,10 +62,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -81,47 +81,47 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -141,13 +141,13 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= diff --git a/staging/src/k8s.io/code-generator/kube_codegen.sh b/staging/src/k8s.io/code-generator/kube_codegen.sh index 3342b9dcaebfc..6ded2048368f3 100755 --- a/staging/src/k8s.io/code-generator/kube_codegen.sh +++ b/staging/src/k8s.io/code-generator/kube_codegen.sh @@ -50,11 +50,16 @@ function kube::codegen::internal::git_grep() { # --boilerplate # An optional override for the header file to insert into generated files. # +# --extra-peer-dir +# An optional list (this flag may be specified multiple times) of "extra" +# directories to consider during conversion generation. +# function kube::codegen::gen_helpers() { local in_pkg_root="" local out_base="" # gengo needs the output dir must be $out_base/$out_pkg_root local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" local v="${KUBE_VERBOSE:-0}" + local extra_peers=() while [ "$#" -gt 0 ]; do case "$1" in @@ -70,6 +75,10 @@ function kube::codegen::gen_helpers() { boilerplate="$2" shift 2 ;; + "--extra-peer-dir") + extra_peers+=("$2") + shift 2 + ;; *) echo "unknown argument: $1" >&2 return 1 @@ -128,16 +137,16 @@ function kube::codegen::gen_helpers() { ":(glob)${root}"/'**/zz_generated.deepcopy.go' \ | xargs -0 rm -f - local inputs=() + local input_args=() for arg in "${input_pkgs[@]}"; do - inputs+=("--input-dirs" "$arg") + input_args+=("--input-dirs" "$arg") done "${gobin}/deepcopy-gen" \ -v "${v}" \ -O zz_generated.deepcopy \ --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ - "${inputs[@]}" + "${input_args[@]}" fi # Defaults @@ -162,16 +171,16 @@ function kube::codegen::gen_helpers() { ":(glob)${root}"/'**/zz_generated.defaults.go' \ | xargs -0 rm -f - local inputs=() + local input_args=() for arg in "${input_pkgs[@]}"; do - inputs+=("--input-dirs" "$arg") + input_args+=("--input-dirs" "$arg") done "${gobin}/defaulter-gen" \ -v "${v}" \ -O zz_generated.defaults \ --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ - "${inputs[@]}" + "${input_args[@]}" fi # Conversions @@ -196,16 +205,21 @@ function kube::codegen::gen_helpers() { ":(glob)${root}"/'**/zz_generated.conversion.go' \ | xargs -0 rm -f - local inputs=() + local input_args=() for arg in "${input_pkgs[@]}"; do - inputs+=("--input-dirs" "$arg") + input_args+=("--input-dirs" "$arg") + done + local extra_peer_args=() + for arg in "${extra_peers[@]:+"${extra_peers[@]}"}"; do + extra_peer_args+=("--extra-peer-dirs" "$arg") done "${gobin}/conversion-gen" \ -v "${v}" \ -O zz_generated.conversion \ --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ - "${inputs[@]}" + "${extra_peer_args[@]:+"${extra_peer_args[@]}"}" \ + "${input_args[@]}" fi } diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index b3300fce839e3..445719fb705b5 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -2,7 +2,7 @@ module k8s.io/component-base -go 1.20 +go 1.21.3 require ( github.com/blang/semver/v4 v4.0.0 @@ -16,14 +16,15 @@ require ( github.com/prometheus/procfs v0.10.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/trace v1.10.0 + github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 + go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 + go.opentelemetry.io/otel/sdk v1.19.0 + go.opentelemetry.io/otel/trace v1.19.0 go.uber.org/zap v1.19.0 - golang.org/x/sys v0.10.0 + golang.org/x/sys v0.13.0 + gopkg.in/yaml.v2 v2.4.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/klog/v2 v2.100.1 @@ -37,7 +38,7 @@ require ( github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -48,7 +49,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -58,29 +59,25 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/goleak v1.2.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.0.0 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum index 17b06c7c2c85f..8be3c6735d9e9 100644 --- a/staging/src/k8s.io/component-base/go.sum +++ b/staging/src/k8s.io/component-base/go.sum @@ -1,160 +1,8 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -168,25 +16,11 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -194,25 +28,14 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -233,83 +56,32 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -318,8 +90,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -347,10 +117,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -359,7 +129,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= @@ -367,11 +136,9 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -381,42 +148,31 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -429,291 +185,77 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -721,23 +263,13 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/component-base/metrics/metric.go b/staging/src/k8s.io/component-base/metrics/metric.go index 3b22d21ef2525..d68a98c44a13b 100644 --- a/staging/src/k8s.io/component-base/metrics/metric.go +++ b/staging/src/k8s.io/component-base/metrics/metric.go @@ -166,7 +166,7 @@ func (r *lazyMetric) Create(version *semver.Version) bool { if deprecatedV != nil { dv = deprecatedV.String() } - registeredMetrics.WithLabelValues(string(sl), dv).Inc() + registeredMetricsTotal.WithLabelValues(string(sl), dv).Inc() return r.IsCreated() } diff --git a/staging/src/k8s.io/component-base/metrics/options.go b/staging/src/k8s.io/component-base/metrics/options.go index 7a59b7ba16966..2c72cb48fd629 100644 --- a/staging/src/k8s.io/component-base/metrics/options.go +++ b/staging/src/k8s.io/component-base/metrics/options.go @@ -31,6 +31,7 @@ type Options struct { ShowHiddenMetricsForVersion string DisabledMetrics []string AllowListMapping map[string]string + AllowListMappingManifest string } // NewOptions returns default metrics options @@ -40,6 +41,10 @@ func NewOptions() *Options { // Validate validates metrics flags options. func (o *Options) Validate() []error { + if o == nil { + return nil + } + var errs []error err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) if err != nil { @@ -77,6 +82,10 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "The map from metric-label to value allow-list of this label. The key's format is ,. "+ "The value's format is ,..."+ "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") + fs.StringVar(&o.AllowListMappingManifest, "allow-metric-labels-manifest", o.AllowListMappingManifest, + "The path to the manifest file that contains the allow-list mapping. "+ + "The format of the file is the same as the flag --allow-metric-labels. "+ + "Note that the flag --allow-metric-labels will override the manifest file.") } // Apply applies parameters into global configuration of metrics. @@ -93,6 +102,8 @@ func (o *Options) Apply() { } if o.AllowListMapping != nil { SetLabelAllowListFromCLI(o.AllowListMapping) + } else if len(o.AllowListMappingManifest) > 0 { + SetLabelAllowListFromManifest(o.AllowListMappingManifest) } } @@ -118,7 +129,7 @@ func validateAllowMetricLabel(allowListMapping map[string]string) error { for k := range allowListMapping { reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) if reg.FindString(k) != k { - return fmt.Errorf("--allow-metric-labels must has a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") } } return nil diff --git a/staging/src/k8s.io/component-base/metrics/opts.go b/staging/src/k8s.io/component-base/metrics/opts.go index 49d2d40bbf718..30dfd2e3dcc13 100644 --- a/staging/src/k8s.io/component-base/metrics/opts.go +++ b/staging/src/k8s.io/component-base/metrics/opts.go @@ -18,13 +18,18 @@ package metrics import ( "fmt" + "os" + "path/filepath" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/util/sets" promext "k8s.io/component-base/metrics/prometheusextension" + "k8s.io/klog/v2" ) var ( @@ -319,6 +324,7 @@ func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, lab if allowValues, ok := allowList.labelToAllowList[name]; ok { if !allowValues.Has(value) { labelValueList[index] = "unexpected" + cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() } } } @@ -329,6 +335,7 @@ func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]strin if allowValues, ok := allowList.labelToAllowList[name]; ok { if !allowValues.Has(value) { labels[name] = "unexpected" + cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() } } } @@ -354,3 +361,20 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { } } } + +func SetLabelAllowListFromManifest(manifest string) { + allowListLock.Lock() + defer allowListLock.Unlock() + allowListMapping := make(map[string]string) + data, err := os.ReadFile(filepath.Clean(manifest)) + if err != nil { + klog.Errorf("Failed to read allow list manifest: %v", err) + return + } + err = yaml.Unmarshal(data, &allowListMapping) + if err != nil { + klog.Errorf("Failed to parse allow list manifest: %v", err) + return + } + SetLabelAllowListFromCLI(allowListMapping) +} diff --git a/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 7907dfad12aa9..3d464d12d75e2 100644 --- a/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -37,7 +37,7 @@ var ( Namespace: "kubernetes", Name: "healthcheck", Help: "This metric records the result of a single healthcheck.", - StabilityLevel: k8smetrics.BETA, + StabilityLevel: k8smetrics.STABLE, }, []string{"name", "type"}, ) @@ -48,7 +48,7 @@ var ( Namespace: "kubernetes", Name: "healthchecks_total", Help: "This metric records the results of all healthcheck.", - StabilityLevel: k8smetrics.BETA, + StabilityLevel: k8smetrics.STABLE, }, []string{"name", "type", "status"}, ) diff --git a/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics_test.go b/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics_test.go index b328c868d8b49..621caaab31d17 100644 --- a/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics_test.go +++ b/staging/src/k8s.io/component-base/metrics/prometheus/slis/metrics_test.go @@ -37,10 +37,10 @@ func TestObserveHealthcheck(t *testing.T) { initialState := Error healthcheckName := "healthcheck-a" initialOutput := ` - # HELP kubernetes_healthcheck [BETA] This metric records the result of a single healthcheck. + # HELP kubernetes_healthcheck [STABLE] This metric records the result of a single healthcheck. # TYPE kubernetes_healthcheck gauge kubernetes_healthcheck{name="healthcheck-a",type="healthz"} 0 - # HELP kubernetes_healthchecks_total [BETA] This metric records the results of all healthcheck. + # HELP kubernetes_healthchecks_total [STABLE] This metric records the results of all healthcheck. # TYPE kubernetes_healthchecks_total counter kubernetes_healthchecks_total{name="healthcheck-a",status="error",type="healthz"} 1 ` @@ -57,10 +57,10 @@ func TestObserveHealthcheck(t *testing.T) { hcType: "healthz", hcStatus: Success, want: ` - # HELP kubernetes_healthcheck [BETA] This metric records the result of a single healthcheck. + # HELP kubernetes_healthcheck [STABLE] This metric records the result of a single healthcheck. # TYPE kubernetes_healthcheck gauge kubernetes_healthcheck{name="healthcheck-a",type="healthz"} 1 - # HELP kubernetes_healthchecks_total [BETA] This metric records the results of all healthcheck. + # HELP kubernetes_healthchecks_total [STABLE] This metric records the results of all healthcheck. # TYPE kubernetes_healthchecks_total counter kubernetes_healthchecks_total{name="healthcheck-a",status="error",type="healthz"} 1 kubernetes_healthchecks_total{name="healthcheck-a",status="success",type="healthz"} 1 diff --git a/staging/src/k8s.io/component-base/metrics/registry.go b/staging/src/k8s.io/component-base/metrics/registry.go index 1942f9958d237..203813e814318 100644 --- a/staging/src/k8s.io/component-base/metrics/registry.go +++ b/staging/src/k8s.io/component-base/metrics/registry.go @@ -37,7 +37,7 @@ var ( registriesLock sync.RWMutex disabledMetrics = map[string]struct{}{} - registeredMetrics = NewCounterVec( + registeredMetricsTotal = NewCounterVec( &CounterOpts{ Name: "registered_metrics_total", Help: "The count of registered metrics broken by stability level and deprecation version.", @@ -61,6 +61,14 @@ var ( StabilityLevel: BETA, }, ) + + cardinalityEnforcementUnexpectedCategorizationsTotal = NewCounter( + &CounterOpts{ + Name: "cardinality_enforcement_unexpected_categorizations_total", + Help: "The count of unexpected categorizations during cardinality enforcement.", + StabilityLevel: ALPHA, + }, + ) ) // shouldHide be used to check if a specific metric with deprecated version should be hidden @@ -379,7 +387,8 @@ func NewKubeRegistry() KubeRegistry { } func (r *kubeRegistry) RegisterMetaMetrics() { - r.MustRegister(registeredMetrics) + r.MustRegister(registeredMetricsTotal) r.MustRegister(disabledMetricsTotal) r.MustRegister(hiddenMetricsTotal) + r.MustRegister(cardinalityEnforcementUnexpectedCategorizationsTotal) } diff --git a/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go b/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go index 1aa4c0664ea5d..1371c0cac21c2 100644 --- a/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go +++ b/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go @@ -97,9 +97,9 @@ func TestValidateTracingConfiguration(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { errs := ValidateTracingConfiguration(tc.contents, nil, field.NewPath("tracing")) - if tc.expectError == false && len(errs) != 0 { + if !tc.expectError && len(errs) != 0 { t.Errorf("Calling ValidateTracingConfiguration expected no error, got %v", errs) - } else if tc.expectError == true && len(errs) == 0 { + } else if tc.expectError && len(errs) == 0 { t.Errorf("Calling ValidateTracingConfiguration expected error, got no error") } }) diff --git a/staging/src/k8s.io/component-base/tracing/tracing.go b/staging/src/k8s.io/component-base/tracing/tracing.go index 50894eb3b9baa..bdf6f377dde57 100644 --- a/staging/src/k8s.io/component-base/tracing/tracing.go +++ b/staging/src/k8s.io/component-base/tracing/tracing.go @@ -68,6 +68,12 @@ func (s *Span) End(logThreshold time.Duration) { } } +// RecordError will record err as an exception span event for this span. +// If this span is not being recorded or err is nil then this method does nothing. +func (s *Span) RecordError(err error, attributes ...attribute.KeyValue) { + s.otelSpan.RecordError(err, trace.WithAttributes(attributes...)) +} + func attributesToFields(attributes []attribute.KeyValue) []utiltrace.Field { fields := make([]utiltrace.Field, len(attributes)) for i := range attributes { diff --git a/staging/src/k8s.io/component-base/tracing/tracing_test.go b/staging/src/k8s.io/component-base/tracing/tracing_test.go index 0e3e12c60710b..145eb57b09500 100644 --- a/staging/src/k8s.io/component-base/tracing/tracing_test.go +++ b/staging/src/k8s.io/component-base/tracing/tracing_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "flag" + "fmt" "strings" "testing" "time" @@ -56,6 +57,9 @@ func TestOpenTelemetryTracing(t *testing.T) { tr.AddEvent("reticulated splines", attribute.Bool("should I do it?", false)) // took 5ms time.Sleep(10 * time.Millisecond) + // Add error event to the frobber span + tr.RecordError(fmt.Errorf("something went wrong")) + // Ensure setting context with span makes the next span a child ctx = ContextWithSpan(context.Background(), tr) @@ -87,7 +91,7 @@ func TestOpenTelemetryTracing(t *testing.T) { if len(child.Attributes()) != 1 { t.Errorf("got attributes %v; expected one attribute in child.Attributes()", child.Attributes()) } - if len(child.Events()) != 2 { + if len(child.Events()) != 3 { t.Errorf("got events %v; expected 2 events in child.Events()", child.Events()) } if child.Events()[0].Name != "reticulated splines" { @@ -96,11 +100,17 @@ func TestOpenTelemetryTracing(t *testing.T) { if len(child.Events()[0].Attributes) != 1 { t.Errorf("got event %v; expected 1 attribute in child.Events()[0].Attributes", child.Events()[0]) } - if child.Events()[1].Name != "sequenced particles" { - t.Errorf("got event %v; expected child.Events()[1].Name == sequenced particles", child.Events()[1]) + if child.Events()[1].Name != "exception" { + t.Errorf("got event %v; expected child.Events()[1].Name == something went wrong", child.Events()[1]) + } + if len(child.Events()[1].Attributes) != 2 { + t.Errorf("got event %#v; expected 2 attribute in child.Events()[1].Attributes", child.Events()[1]) + } + if child.Events()[2].Name != "sequenced particles" { + t.Errorf("got event %v; expected child.Events()[2].Name == sequenced particles", child.Events()[2]) } - if len(child.Events()[1].Attributes) != 1 { - t.Errorf("got event %v; expected 1 attribute in child.Events()[1].Attributes", child.Events()[1]) + if len(child.Events()[2].Attributes) != 1 { + t.Errorf("got event %v; expected 1 attribute in child.Events()[2].Attributes", child.Events()[2]) } // Parent span is ended last parent := output[2] diff --git a/staging/src/k8s.io/component-base/tracing/utils.go b/staging/src/k8s.io/component-base/tracing/utils.go index ae894a8091cbd..72c8cf23e8aec 100644 --- a/staging/src/k8s.io/component-base/tracing/utils.go +++ b/staging/src/k8s.io/component-base/tracing/utils.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" oteltrace "go.opentelemetry.io/otel/trace" "k8s.io/client-go/transport" @@ -95,9 +96,17 @@ func WithTracing(handler http.Handler, tp oteltrace.TracerProvider, serviceName otelhttp.WithPropagators(Propagators()), otelhttp.WithTracerProvider(tp), } + wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add the http.target attribute to the otelhttp span + // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 + if r.URL != nil { + oteltrace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) + } + handler.ServeHTTP(w, r) + }) // With Noop TracerProvider, the otelhttp still handles context propagation. // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough - return otelhttp.NewHandler(handler, serviceName, opts...) + return otelhttp.NewHandler(wrappedHandler, serviceName, opts...) } // WrapperFor can be used to add tracing to a *rest.Config. diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod index 970094aa86acb..fd9babd46a0bd 100644 --- a/staging/src/k8s.io/component-helpers/go.mod +++ b/staging/src/k8s.io/component-helpers/go.mod @@ -2,7 +2,7 @@ module k8s.io/component-helpers -go 1.20 +go 1.21.3 require ( github.com/google/go-cmp v0.5.9 @@ -15,7 +15,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -33,18 +33,18 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/staging/src/k8s.io/component-helpers/go.sum b/staging/src/k8s.io/component-helpers/go.sum index f78ec9a1bc79f..0674b929993d7 100644 --- a/staging/src/k8s.io/component-helpers/go.sum +++ b/staging/src/k8s.io/component-helpers/go.sum @@ -1,4 +1,5 @@ -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -6,8 +7,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -68,10 +69,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -88,14 +89,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -104,33 +105,33 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -153,11 +154,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod index f018a31ebb992..d4f358943806e 100644 --- a/staging/src/k8s.io/controller-manager/go.mod +++ b/staging/src/k8s.io/controller-manager/go.mod @@ -2,12 +2,12 @@ module k8s.io/controller-manager -go 1.20 +go 1.21.3 require ( github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - golang.org/x/oauth2 v0.8.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/oauth2 v0.10.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -28,10 +28,10 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -46,7 +46,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -67,38 +67,37 @@ require ( go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/otel/trace v1.10.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/staging/src/k8s.io/controller-manager/go.sum b/staging/src/k8s.io/controller-manager/go.sum index 29e10b370dcd9..28d5470f4d8b3 100644 --- a/staging/src/k8s.io/controller-manager/go.sum +++ b/staging/src/k8s.io/controller-manager/go.sum @@ -1,163 +1,126 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -174,25 +137,12 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -205,27 +155,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -249,78 +189,31 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -330,11 +223,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -346,8 +236,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -374,10 +262,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -387,7 +275,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= @@ -395,7 +282,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -403,7 +289,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -420,16 +305,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= @@ -447,32 +330,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -485,298 +360,83 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -784,25 +444,15 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/cri-api/OWNERS b/staging/src/k8s.io/cri-api/OWNERS index 06f727c77f959..cab937e2dd247 100644 --- a/staging/src/k8s.io/cri-api/OWNERS +++ b/staging/src/k8s.io/cri-api/OWNERS @@ -1,15 +1,19 @@ # See the OWNERS docs at https://go.k8s.io/owners +# Disable inheritance as this is owned by sig-node (should mirror same contents as pkg/kubelet/OWNERS) +options: + no_parent_owners: true approvers: - dims - feiskyer - sig-node-approvers - - sig-node-api-approvers - api-approvers + - sig-node-cri-approvers reviewers: - sig-node-reviewers - dims labels: - sig/node + - area/kubelet emeritus_approvers: - resouer diff --git a/staging/src/k8s.io/cri-api/README.md b/staging/src/k8s.io/cri-api/README.md index 2b4bcbde1e49c..cd2c75017d2dd 100644 --- a/staging/src/k8s.io/cri-api/README.md +++ b/staging/src/k8s.io/cri-api/README.md @@ -221,6 +221,13 @@ No changes - [Expose commit memory used in WindowsMemoryUsage struct](https://github.com/kubernetes/kubernetes/pull/119238) - Added the `commit_memory_bytes` field to type `WindowsMemoryUsage` +### v1.29 + +`git diff v1.28.0 v1.29.0 -- staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto` + +- [Add runtime handler field to ImageSpec struct](https://github.com/kubernetes/kubernetes/pull/121121) + - Added `runtime_handler` field to type `ImageSpec` + ## Community, discussion, contribution, and support Learn how to engage with the Kubernetes community on the [community @@ -238,7 +245,7 @@ You can reach the maintainers of this repository at: Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). -### Contibution Guidelines +### Contribution Guidelines See [CONTRIBUTING.md](CONTRIBUTING.md) for more information. Please note that [kubernetes/cri-api](https://github.com/kubernetes/cri-api) is a readonly mirror repository, all development is done at [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes). diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod index 93daf35a14f64..e32ae4f51dc85 100644 --- a/staging/src/k8s.io/cri-api/go.mod +++ b/staging/src/k8s.io/cri-api/go.mod @@ -2,12 +2,12 @@ module k8s.io/cri-api -go 1.20 +go 1.21.3 require ( github.com/gogo/protobuf v1.3.2 - github.com/stretchr/testify v1.8.2 - google.golang.org/grpc v1.54.0 + github.com/stretchr/testify v1.8.4 + google.golang.org/grpc v1.58.2 ) require ( @@ -16,10 +16,10 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/cri-api/go.sum b/staging/src/k8s.io/cri-api/go.sum index b3e3741ab7bde..6e9012f356840 100644 --- a/staging/src/k8s.io/cri-api/go.sum +++ b/staging/src/k8s.io/cri-api/go.sum @@ -1,18 +1,17 @@ -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -35,19 +34,15 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -55,22 +50,23 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -81,11 +77,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -93,6 +90,5 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/staging/src/k8s.io/cri-api/pkg/OWNERS b/staging/src/k8s.io/cri-api/pkg/OWNERS deleted file mode 100644 index 9cb9564e23e3b..0000000000000 --- a/staging/src/k8s.io/cri-api/pkg/OWNERS +++ /dev/null @@ -1,12 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# Disable inheritance as this is owned by sig-node (should mirror same contents as pkg/kubelet/OWNERS) -options: - no_parent_owners: true -approvers: - - sig-node-approvers # see https://github.com/kubernetes/kubernetes/blob/master/OWNERS_ALIASES#LC220:~:text=sig%2Dnode%2Dapprovers -reviewers: - - sig-node-reviewers # see https://github.com/kubernetes/kubernetes/blob/master/OWNERS_ALIASES#LC220:~:text=sig%2Dnode%2Dreviewers -labels: - - area/kubelet - - sig/node diff --git a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go index 4db4fe0767fc5..36e4eebacdf29 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go @@ -3489,7 +3489,11 @@ type ImageSpec struct { Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // The container image reference specified by the user (e.g. image[:tag] or digest). // Only set if available within the RPC context. - UserSpecifiedImage string `protobuf:"bytes,18,opt,name=user_specified_image,json=userSpecifiedImage,proto3" json:"user_specified_image,omitempty"` + UserSpecifiedImage string `protobuf:"bytes,18,opt,name=user_specified_image,json=userSpecifiedImage,proto3" json:"user_specified_image,omitempty"` + // Runtime handler to use for pulling the image. + // If the runtime handler is unknown, the request should be rejected. + // An empty string would select the default runtime handler. + RuntimeHandler string `protobuf:"bytes,19,opt,name=runtime_handler,json=runtimeHandler,proto3" json:"runtime_handler,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_sizecache int32 `json:"-"` } @@ -3547,6 +3551,13 @@ func (m *ImageSpec) GetUserSpecifiedImage() string { return "" } +func (m *ImageSpec) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + type KeyValue struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -8023,7 +8034,12 @@ func (m *WindowsFilesystemUsage) GetUsedBytes() *UInt64Value { type ImageFsInfoResponse struct { // Information of image filesystem(s). - ImageFilesystems []*FilesystemUsage `protobuf:"bytes,1,rep,name=image_filesystems,json=imageFilesystems,proto3" json:"image_filesystems,omitempty"` + ImageFilesystems []*FilesystemUsage `protobuf:"bytes,1,rep,name=image_filesystems,json=imageFilesystems,proto3" json:"image_filesystems,omitempty"` + // Information of container filesystem(s). + // This is an optional field, may be used for example if container and image + // storage are separated. + // Default will be to return this as empty. + ContainerFilesystems []*FilesystemUsage `protobuf:"bytes,2,rep,name=container_filesystems,json=containerFilesystems,proto3" json:"container_filesystems,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_sizecache int32 `json:"-"` } @@ -8067,6 +8083,13 @@ func (m *ImageFsInfoResponse) GetImageFilesystems() []*FilesystemUsage { return nil } +func (m *ImageFsInfoResponse) GetContainerFilesystems() []*FilesystemUsage { + if m != nil { + return m.ContainerFilesystems + } + return nil +} + type ContainerStatsRequest struct { // ID of the container for which to retrieve stats. ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` @@ -9994,432 +10017,434 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 6791 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3d, 0x4b, 0x6c, 0x1c, 0xc9, - 0x75, 0xec, 0x99, 0x21, 0x39, 0xf3, 0x86, 0x43, 0x0e, 0x4b, 0x14, 0x49, 0x8d, 0xfe, 0xbd, 0x3f, - 0x49, 0xbb, 0xfa, 0xac, 0xf6, 0x27, 0xc9, 0xfb, 0xd1, 0x88, 0xe4, 0x6a, 0x67, 0x2d, 0x91, 0xe3, - 0x1e, 0x72, 0xed, 0x5d, 0x07, 0xee, 0xb4, 0xa6, 0x8b, 0x64, 0xaf, 0x66, 0xba, 0xdb, 0xdd, 0x3d, - 0x92, 0xe8, 0x53, 0x8e, 0x89, 0x4f, 0x06, 0x12, 0xc7, 0x88, 0x11, 0x24, 0xc8, 0x21, 0x48, 0x6e, - 0x09, 0x02, 0x24, 0x71, 0x90, 0x1f, 0x60, 0x24, 0x86, 0x13, 0x20, 0x40, 0x0e, 0x09, 0xe0, 0x43, - 0x82, 0xd8, 0x9b, 0x00, 0x01, 0x72, 0xf6, 0x21, 0xa7, 0x38, 0xa8, 0x5f, 0x77, 0x57, 0xff, 0x66, - 0xc8, 0x5d, 0xef, 0xae, 0x4f, 0x9c, 0x7e, 0xf5, 0xde, 0xab, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0x55, - 0xf5, 0xaa, 0x08, 0x35, 0xc3, 0xb5, 0xae, 0xb8, 0x9e, 0x13, 0x38, 0x08, 0xbc, 0x91, 0x1d, 0x58, - 0x43, 0x7c, 0xe5, 0xd1, 0x8b, 0xad, 0xcb, 0x7b, 0x56, 0xb0, 0x3f, 0x7a, 0x70, 0xa5, 0xef, 0x0c, - 0xaf, 0xee, 0x39, 0x7b, 0xce, 0x55, 0x8a, 0xf2, 0x60, 0xb4, 0x4b, 0xbf, 0xe8, 0x07, 0xfd, 0xc5, - 0x48, 0xd5, 0x4b, 0x30, 0xff, 0x1e, 0xf6, 0x7c, 0xcb, 0xb1, 0x35, 0xfc, 0xf5, 0x11, 0xf6, 0x03, - 0xb4, 0x0a, 0xb3, 0x8f, 0x18, 0x64, 0x55, 0x39, 0xa7, 0x5c, 0xa8, 0x69, 0xe2, 0x53, 0xfd, 0x03, - 0x05, 0x16, 0x42, 0x64, 0xdf, 0x75, 0x6c, 0x1f, 0xe7, 0x63, 0xa3, 0xf3, 0x30, 0xc7, 0xc5, 0xd2, - 0x6d, 0x63, 0x88, 0x57, 0x4b, 0xb4, 0xb8, 0xce, 0x61, 0x9b, 0xc6, 0x10, 0xa3, 0xe7, 0x60, 0x41, - 0xa0, 0x08, 0x26, 0x65, 0x8a, 0x35, 0xcf, 0xc1, 0xbc, 0x36, 0x74, 0x05, 0x8e, 0x09, 0x44, 0xc3, - 0xb5, 0x42, 0xe4, 0x0a, 0x45, 0x5e, 0xe4, 0x45, 0x6d, 0xd7, 0xe2, 0xf8, 0xea, 0x57, 0xa1, 0xb6, - 0xbe, 0xd9, 0x5b, 0x73, 0xec, 0x5d, 0x6b, 0x8f, 0x88, 0xe8, 0x63, 0x8f, 0xd0, 0xac, 0x2a, 0xe7, - 0xca, 0x44, 0x44, 0xfe, 0x89, 0x5a, 0x50, 0xf5, 0xb1, 0xe1, 0xf5, 0xf7, 0xb1, 0xbf, 0x5a, 0xa2, - 0x45, 0xe1, 0x37, 0xa1, 0x72, 0xdc, 0xc0, 0x72, 0x6c, 0x7f, 0xb5, 0xcc, 0xa8, 0xf8, 0xa7, 0xfa, - 0xdb, 0x0a, 0xd4, 0xbb, 0x8e, 0x17, 0xdc, 0x37, 0x5c, 0xd7, 0xb2, 0xf7, 0xd0, 0x35, 0xa8, 0x52, - 0x5d, 0xf6, 0x9d, 0x01, 0xd5, 0xc1, 0xfc, 0xf5, 0xa5, 0x2b, 0x51, 0x87, 0x5c, 0xe9, 0xf2, 0x32, - 0x2d, 0xc4, 0x42, 0xcf, 0xc0, 0x7c, 0xdf, 0xb1, 0x03, 0xc3, 0xb2, 0xb1, 0xa7, 0xbb, 0x8e, 0x17, - 0x50, 0xe5, 0x4c, 0x6b, 0x8d, 0x10, 0x4a, 0xf8, 0xa3, 0x93, 0x50, 0xdb, 0x77, 0xfc, 0x80, 0x61, - 0x94, 0x29, 0x46, 0x95, 0x00, 0x68, 0xe1, 0x0a, 0xcc, 0xd2, 0x42, 0xcb, 0xe5, 0x6a, 0x98, 0x21, - 0x9f, 0x1d, 0x57, 0xfd, 0x7e, 0x09, 0xa6, 0xef, 0x3b, 0x23, 0x3b, 0x48, 0x54, 0x63, 0x04, 0xfb, - 0xbc, 0x8b, 0x62, 0xd5, 0x18, 0xc1, 0x7e, 0x54, 0x0d, 0xc1, 0x60, 0xbd, 0xc4, 0xaa, 0x21, 0x85, - 0x2d, 0xa8, 0x7a, 0xd8, 0x30, 0x1d, 0x7b, 0x70, 0x40, 0x45, 0xa8, 0x6a, 0xe1, 0x37, 0xe9, 0x3e, - 0x1f, 0x0f, 0x2c, 0x7b, 0xf4, 0x44, 0xf7, 0xf0, 0xc0, 0x78, 0x80, 0x07, 0x54, 0x94, 0xaa, 0x36, - 0xcf, 0xc1, 0x1a, 0x83, 0xa2, 0x37, 0xa1, 0xee, 0x7a, 0x8e, 0x6b, 0xec, 0x19, 0x44, 0x83, 0xab, - 0xd3, 0x54, 0x49, 0xa7, 0xe2, 0x4a, 0xa2, 0x02, 0x77, 0x23, 0x1c, 0x2d, 0x4e, 0x80, 0x5e, 0x83, - 0xfa, 0xc8, 0x32, 0xb9, 0xbe, 0xfd, 0xd5, 0x99, 0x73, 0xe5, 0x0b, 0xf5, 0xeb, 0xc7, 0xe3, 0xf4, - 0x9d, 0x75, 0x5e, 0xaa, 0xc5, 0x31, 0x09, 0xe1, 0x5e, 0x8c, 0x70, 0xb6, 0x90, 0x30, 0x86, 0xa9, - 0xea, 0x50, 0x0b, 0x4b, 0x22, 0x55, 0x9b, 0x54, 0x81, 0x0d, 0xae, 0x6a, 0x93, 0x98, 0x78, 0xa4, - 0x60, 0xcb, 0xa4, 0xca, 0x6b, 0x68, 0xf5, 0x10, 0xd6, 0x31, 0xd1, 0x32, 0xcc, 0x0c, 0xb0, 0xbd, - 0x17, 0xec, 0x53, 0xed, 0x35, 0x34, 0xfe, 0xa5, 0xfe, 0x86, 0x02, 0x8d, 0x1d, 0x1f, 0x7b, 0x64, - 0x1c, 0xf8, 0xae, 0xd1, 0xc7, 0xe8, 0x32, 0x54, 0x86, 0x8e, 0x89, 0xb9, 0x09, 0x9d, 0x88, 0x0b, - 0x19, 0x22, 0xdd, 0x77, 0x4c, 0xac, 0x51, 0x34, 0x74, 0x11, 0x2a, 0x23, 0xcb, 0x64, 0x76, 0x9b, - 0xdb, 0x26, 0x8a, 0x42, 0x50, 0xf7, 0x08, 0x6a, 0xb9, 0x10, 0x95, 0xa0, 0xa8, 0x3f, 0x53, 0x60, - 0x21, 0xac, 0x6d, 0x8b, 0x1a, 0x3c, 0x7a, 0x09, 0x66, 0x6d, 0x1c, 0x3c, 0x76, 0xbc, 0x87, 0xe3, - 0x65, 0x13, 0x98, 0xe8, 0x79, 0x28, 0xbb, 0x5c, 0x23, 0x85, 0x04, 0x04, 0x8b, 0x20, 0x5b, 0x6e, - 0x9f, 0x6a, 0xa8, 0x18, 0xd9, 0x72, 0xfb, 0xc4, 0x5c, 0x03, 0xc3, 0xdb, 0xc3, 0xb4, 0x3f, 0x98, - 0xe9, 0x57, 0x19, 0xa0, 0x63, 0xa2, 0xdb, 0x30, 0x3f, 0xf2, 0xb1, 0x67, 0xfb, 0xba, 0x18, 0xbc, - 0xc4, 0xd8, 0xea, 0x32, 0x53, 0x49, 0xef, 0x5a, 0x83, 0x11, 0x6c, 0xf1, 0xd1, 0xad, 0x02, 0x74, - 0xec, 0xe0, 0xd5, 0x97, 0xdf, 0x33, 0x06, 0x23, 0x8c, 0x96, 0x60, 0xfa, 0x11, 0xf9, 0x41, 0x5b, - 0x5e, 0xd6, 0xd8, 0x87, 0xfa, 0xd7, 0x15, 0x38, 0x79, 0x8f, 0x18, 0x78, 0xcf, 0xb0, 0xcd, 0x07, - 0xce, 0x93, 0x1e, 0xee, 0x8f, 0x3c, 0x2b, 0x38, 0x58, 0x73, 0xec, 0x00, 0x3f, 0x09, 0xd0, 0x3b, - 0xb0, 0x68, 0x0b, 0xfe, 0xa1, 0x20, 0x0a, 0x15, 0xe4, 0x64, 0x66, 0xeb, 0x58, 0xe5, 0x5a, 0xd3, - 0x96, 0x01, 0x3e, 0xba, 0x13, 0x0d, 0x31, 0xc1, 0xa7, 0x94, 0x6e, 0x50, 0x6f, 0x83, 0x4a, 0xc3, - 0xb9, 0x88, 0xd1, 0x27, 0x78, 0xbc, 0x0a, 0xc4, 0xe9, 0xea, 0x86, 0xaf, 0x93, 0x96, 0x52, 0x2d, - 0xd7, 0xaf, 0x2f, 0x4b, 0x56, 0x10, 0x36, 0x58, 0xab, 0x79, 0x23, 0xbb, 0xed, 0x13, 0x0d, 0xa1, - 0x1b, 0xd4, 0x81, 0x13, 0xba, 0x3d, 0xcf, 0x19, 0xb9, 0xab, 0xd5, 0x42, 0x42, 0xa0, 0x84, 0x77, - 0x09, 0x26, 0xf5, 0xeb, 0xdc, 0x49, 0xe8, 0x9e, 0xe3, 0x04, 0xbb, 0xbe, 0x70, 0x0c, 0x02, 0xac, - 0x51, 0x28, 0xba, 0x0a, 0xc7, 0xfc, 0x91, 0xeb, 0x0e, 0xf0, 0x10, 0xdb, 0x81, 0x31, 0x60, 0x15, - 0x91, 0x3e, 0x2b, 0x5f, 0x28, 0x6b, 0x28, 0x5e, 0x44, 0x19, 0xfb, 0xe8, 0x0c, 0x80, 0xeb, 0x59, - 0x8f, 0xac, 0x01, 0xde, 0xc3, 0xe6, 0xea, 0x0c, 0x65, 0x1a, 0x83, 0xa0, 0x57, 0x88, 0xaf, 0xef, - 0xf7, 0x9d, 0xa1, 0xbb, 0x5a, 0x4b, 0xeb, 0x5b, 0xf4, 0x53, 0xd7, 0x73, 0x76, 0xad, 0x01, 0xd6, - 0x04, 0x2e, 0x7a, 0x0d, 0xaa, 0x86, 0xeb, 0x1a, 0xde, 0xd0, 0xf1, 0x56, 0x61, 0x3c, 0x5d, 0x88, - 0x8c, 0x5e, 0x86, 0x25, 0xce, 0x43, 0x77, 0x59, 0x21, 0x73, 0xa3, 0xb3, 0xc4, 0x2e, 0xef, 0x94, - 0x56, 0x15, 0x0d, 0xf1, 0x72, 0x4e, 0x4b, 0x9c, 0xaa, 0xfa, 0x77, 0x0a, 0x2c, 0x24, 0x78, 0xa2, - 0x77, 0x61, 0x4e, 0x70, 0x08, 0x0e, 0x5c, 0xe1, 0x06, 0x9e, 0x2b, 0x10, 0xe3, 0x0a, 0xff, 0xbb, - 0x7d, 0xe0, 0x62, 0xea, 0x2f, 0xc5, 0x07, 0x7a, 0x0a, 0x1a, 0x03, 0xa7, 0x6f, 0x0c, 0xa8, 0xd7, - 0xf2, 0xf0, 0x2e, 0xf7, 0xea, 0x73, 0x21, 0x50, 0xc3, 0xbb, 0xea, 0x6d, 0xa8, 0xc7, 0x18, 0x20, - 0x04, 0xf3, 0x1a, 0xab, 0x6a, 0x1d, 0xef, 0x1a, 0xa3, 0x41, 0xd0, 0x9c, 0x42, 0xf3, 0x00, 0x3b, - 0x76, 0x9f, 0xcc, 0xa2, 0x36, 0x36, 0x9b, 0x0a, 0x6a, 0x40, 0xed, 0x9e, 0x60, 0xd1, 0x2c, 0xa9, - 0xdf, 0x2d, 0xc3, 0x71, 0x6a, 0x78, 0x5d, 0xc7, 0xe4, 0x23, 0x81, 0x4f, 0xb9, 0x4f, 0x41, 0xa3, - 0x4f, 0xfb, 0x52, 0x77, 0x0d, 0x0f, 0xdb, 0x01, 0x9f, 0x78, 0xe6, 0x18, 0xb0, 0x4b, 0x61, 0x48, - 0x83, 0xa6, 0xcf, 0x5b, 0xa4, 0xf7, 0xd9, 0xc8, 0xe1, 0xc6, 0x2d, 0xb5, 0xba, 0x60, 0xa0, 0x69, - 0x0b, 0x7e, 0x6a, 0xe4, 0xcd, 0xfa, 0x07, 0x7e, 0x3f, 0x18, 0x08, 0x6f, 0x77, 0x25, 0xc5, 0x2a, - 0x29, 0xec, 0x95, 0x1e, 0x23, 0xd8, 0xb0, 0x03, 0xef, 0x40, 0x13, 0xe4, 0xe8, 0x2d, 0xa8, 0x3a, - 0x8f, 0xb0, 0xb7, 0x8f, 0x0d, 0xe6, 0x65, 0xea, 0xd7, 0x9f, 0x4a, 0xb1, 0x5a, 0x13, 0x8e, 0x5e, - 0xc3, 0xbe, 0x33, 0xf2, 0xfa, 0xd8, 0xd7, 0x42, 0x22, 0xd4, 0x86, 0x9a, 0x27, 0xc0, 0xdc, 0x0b, - 0x4d, 0xc4, 0x21, 0xa2, 0x6a, 0xdd, 0x82, 0xb9, 0xb8, 0x70, 0xa8, 0x09, 0xe5, 0x87, 0xf8, 0x80, - 0x2b, 0x93, 0xfc, 0x8c, 0xfc, 0x13, 0xeb, 0x61, 0xf6, 0x71, 0xab, 0x74, 0x43, 0x51, 0x3d, 0x40, - 0x51, 0x4b, 0xef, 0xe3, 0xc0, 0x30, 0x8d, 0xc0, 0x40, 0x08, 0x2a, 0x34, 0x18, 0x63, 0x2c, 0xe8, - 0x6f, 0xc2, 0x75, 0xc4, 0x5d, 0x75, 0x4d, 0x23, 0x3f, 0xd1, 0x29, 0xa8, 0x85, 0x9e, 0x88, 0x47, - 0x64, 0x11, 0x80, 0x44, 0x46, 0x46, 0x10, 0xe0, 0xa1, 0x1b, 0x50, 0xc5, 0x34, 0x34, 0xf1, 0xa9, - 0xfe, 0xda, 0x34, 0x34, 0x53, 0xb6, 0x70, 0x0b, 0xaa, 0x43, 0x5e, 0x3d, 0xf7, 0x81, 0x67, 0xa4, - 0xf0, 0x28, 0x25, 0xa4, 0x16, 0xe2, 0x93, 0xe8, 0x83, 0xd8, 0x5a, 0x2c, 0x7e, 0x0c, 0xbf, 0x99, - 0x91, 0xef, 0xe9, 0xa6, 0xe5, 0xe1, 0x7e, 0xe0, 0x78, 0x07, 0x5c, 0xd0, 0xb9, 0x81, 0xb3, 0xb7, - 0x2e, 0x60, 0xe8, 0x65, 0x00, 0xd3, 0xf6, 0x75, 0x6a, 0xc3, 0x7b, 0xbc, 0x1f, 0xa5, 0x09, 0x30, - 0x0c, 0x13, 0xb5, 0x9a, 0x69, 0xfb, 0x5c, 0xe4, 0xd7, 0xa1, 0x41, 0x62, 0x2e, 0x7d, 0x28, 0x02, - 0x87, 0x69, 0x6a, 0x4b, 0x2b, 0xb2, 0xdc, 0x61, 0x04, 0xa8, 0xcd, 0xb9, 0xd1, 0x87, 0x8f, 0x6e, - 0xc3, 0x0c, 0x0d, 0x7b, 0x44, 0xa0, 0x72, 0x21, 0xbb, 0xb9, 0xdc, 0xfa, 0xee, 0x51, 0x54, 0x66, - 0x7c, 0x9c, 0x0e, 0x6d, 0x41, 0xdd, 0xb0, 0x6d, 0x27, 0x30, 0x98, 0xc7, 0x67, 0x61, 0xcb, 0xe5, - 0x42, 0x36, 0xed, 0x08, 0x9f, 0xf1, 0x8a, 0x73, 0x40, 0xaf, 0xc1, 0x34, 0x9d, 0x12, 0xb8, 0x0f, - 0x3f, 0x3f, 0x76, 0x50, 0x68, 0x0c, 0x1f, 0xbd, 0x01, 0xb3, 0x8f, 0x2d, 0xdb, 0x74, 0x1e, 0xfb, - 0xdc, 0x9f, 0x4a, 0x26, 0xfc, 0x65, 0x56, 0x94, 0x22, 0x16, 0x34, 0xad, 0x9b, 0x50, 0x8f, 0xb5, - 0xef, 0x30, 0xf6, 0xdb, 0x7a, 0x13, 0x9a, 0xc9, 0x36, 0x1d, 0xca, 0xfe, 0x47, 0xb0, 0xa4, 0x8d, - 0xec, 0x48, 0x34, 0xb1, 0xbc, 0x79, 0x19, 0x66, 0xb8, 0x35, 0x30, 0x63, 0x3c, 0x55, 0xa4, 0x56, - 0x8d, 0xe3, 0xc6, 0x57, 0x2a, 0xfb, 0x86, 0x6d, 0x0e, 0xb0, 0xc7, 0x6b, 0x14, 0x2b, 0x95, 0x77, - 0x18, 0x54, 0x7d, 0x03, 0x8e, 0x27, 0xaa, 0xe5, 0x0b, 0xa5, 0xa7, 0x61, 0xde, 0x75, 0x4c, 0xdd, - 0x67, 0x60, 0x11, 0x4b, 0xd6, 0x88, 0xed, 0x08, 0xdc, 0x8e, 0x49, 0xc8, 0x7b, 0x81, 0xe3, 0xa6, - 0xc5, 0x9e, 0x8c, 0x7c, 0x15, 0x96, 0x93, 0xe4, 0xac, 0x7a, 0xf5, 0x2d, 0x58, 0xd1, 0xf0, 0xd0, - 0x79, 0x84, 0x8f, 0xca, 0xba, 0x05, 0xab, 0x69, 0x06, 0x9c, 0xf9, 0xfb, 0xb0, 0x12, 0x41, 0x7b, - 0x81, 0x11, 0x8c, 0xfc, 0x43, 0x31, 0xe7, 0xab, 0xc8, 0x07, 0x8e, 0xcf, 0x3a, 0xb2, 0xaa, 0x89, - 0x4f, 0x75, 0x05, 0xa6, 0xbb, 0x8e, 0xd9, 0xe9, 0xa2, 0x79, 0x28, 0x59, 0x2e, 0x27, 0x2e, 0x59, - 0xae, 0xda, 0x8f, 0xd7, 0xb9, 0xc9, 0xa2, 0x4e, 0x56, 0x75, 0x12, 0x15, 0xdd, 0x80, 0x79, 0xc3, - 0x34, 0x2d, 0x62, 0x48, 0xc6, 0x40, 0xb7, 0x5c, 0x11, 0x34, 0x2f, 0x26, 0xba, 0xbe, 0xd3, 0xd5, - 0x1a, 0x11, 0x62, 0xc7, 0xf5, 0xd5, 0x3b, 0x50, 0x8b, 0x02, 0xf4, 0x57, 0xa2, 0x15, 0x61, 0x69, - 0x7c, 0x2c, 0x17, 0x2e, 0x17, 0x37, 0x53, 0x93, 0x24, 0x17, 0xf3, 0x15, 0x80, 0xd0, 0xa9, 0x8a, - 0xf0, 0xf0, 0x78, 0x26, 0x4b, 0x2d, 0x86, 0xa8, 0xfe, 0x47, 0x25, 0xee, 0x64, 0x63, 0x4d, 0x36, - 0xc3, 0x26, 0x9b, 0x92, 0xd3, 0x2d, 0x1d, 0xd2, 0xe9, 0xbe, 0x08, 0xd3, 0x7e, 0x60, 0x04, 0x98, - 0xc7, 0xe3, 0x27, 0xb3, 0x09, 0x49, 0xc5, 0x58, 0x63, 0x98, 0xe8, 0x34, 0x40, 0xdf, 0xc3, 0x46, - 0x80, 0x4d, 0xdd, 0x60, 0xb3, 0x42, 0x59, 0xab, 0x71, 0x48, 0x3b, 0x20, 0x5e, 0x44, 0xac, 0x20, - 0x32, 0x26, 0xc2, 0x9c, 0x6e, 0x8c, 0xd6, 0x12, 0xa1, 0xf7, 0x9a, 0x19, 0xeb, 0xbd, 0x38, 0x29, - 0xf7, 0x5e, 0x91, 0x27, 0x9e, 0x2d, 0xf2, 0xc4, 0x8c, 0x68, 0x12, 0x4f, 0x5c, 0x2d, 0xf2, 0xc4, - 0x9c, 0x4d, 0xb1, 0x27, 0xce, 0x70, 0x24, 0xb5, 0x2c, 0x47, 0xf2, 0x59, 0xba, 0xce, 0xbf, 0x28, - 0xc1, 0x6a, 0x7a, 0x3c, 0x73, 0x3f, 0xf6, 0x32, 0xcc, 0xf8, 0x14, 0x52, 0xec, 0x3f, 0x39, 0x15, - 0xc7, 0x45, 0x77, 0xa0, 0x62, 0xd9, 0xbb, 0x0e, 0x1f, 0x78, 0x57, 0x0a, 0x69, 0x78, 0x4d, 0x57, - 0x3a, 0xf6, 0xae, 0xc3, 0x34, 0x48, 0x69, 0xd1, 0x3d, 0x38, 0x16, 0xae, 0xac, 0x7d, 0x9d, 0x31, - 0xc6, 0x22, 0xce, 0x93, 0xac, 0x34, 0x8c, 0xaa, 0x38, 0x47, 0x14, 0xd1, 0xf5, 0x38, 0x19, 0x89, - 0x71, 0x08, 0xba, 0x1f, 0x18, 0x43, 0x57, 0x58, 0x6c, 0x08, 0x68, 0xbd, 0x06, 0xb5, 0xb0, 0xfa, - 0x43, 0xe9, 0xae, 0x03, 0x4b, 0x89, 0x31, 0xc2, 0x16, 0x92, 0xe1, 0xa0, 0x52, 0x26, 0x1d, 0x54, - 0xea, 0x4f, 0x95, 0xf8, 0x40, 0x7f, 0xdb, 0x1a, 0x04, 0xd8, 0x4b, 0x0d, 0xf4, 0x57, 0x05, 0x5f, - 0x36, 0xca, 0xcf, 0x15, 0xf0, 0x65, 0xeb, 0x34, 0x3e, 0x62, 0xdf, 0x83, 0x79, 0x6a, 0xe2, 0xba, - 0x8f, 0x07, 0x34, 0x56, 0xe2, 0x7a, 0xbc, 0x9a, 0xcd, 0x80, 0xd5, 0xce, 0x86, 0x48, 0x8f, 0x53, - 0xb0, 0xbe, 0x69, 0x0c, 0xe2, 0xb0, 0xd6, 0x6d, 0x40, 0x69, 0xa4, 0x43, 0x69, 0xf0, 0x3e, 0xf1, - 0x97, 0x7e, 0x90, 0x39, 0x73, 0xef, 0x52, 0x31, 0x8a, 0x2d, 0x8f, 0x89, 0xaa, 0x71, 0x5c, 0xf5, - 0x5f, 0xcb, 0x00, 0x51, 0xe1, 0xe7, 0xdc, 0x51, 0xde, 0x0a, 0x1d, 0x16, 0x8b, 0x38, 0xd5, 0x6c, - 0x96, 0x99, 0xae, 0xaa, 0x23, 0xbb, 0x2a, 0x16, 0x7b, 0x3e, 0x97, 0xc3, 0xe0, 0xd0, 0x4e, 0x6a, - 0xf6, 0xf3, 0xe6, 0xa4, 0xde, 0x86, 0xe5, 0xa4, 0x99, 0x70, 0x0f, 0xf5, 0x02, 0x4c, 0x5b, 0x01, - 0x1e, 0xb2, 0xdd, 0xde, 0xc4, 0x86, 0x45, 0x0c, 0x9d, 0x21, 0xa9, 0x6f, 0xc2, 0xb2, 0xdc, 0x57, - 0x87, 0x0b, 0x5d, 0xd4, 0x7b, 0xc9, 0xd8, 0x27, 0x72, 0x95, 0xdc, 0x3e, 0x32, 0xb7, 0x7e, 0x92, - 0x34, 0x0c, 0x53, 0xfd, 0x81, 0x02, 0xc7, 0x13, 0x45, 0x39, 0x03, 0xff, 0xab, 0xa9, 0x01, 0xcc, - 0x7c, 0xeb, 0xcb, 0x05, 0xb5, 0x7c, 0x8a, 0xa3, 0xf8, 0xcb, 0xd0, 0x92, 0xbb, 0x47, 0x52, 0xed, - 0xcd, 0xc4, 0x50, 0x3e, 0x3f, 0x56, 0xe8, 0x70, 0x3c, 0x77, 0xe1, 0x64, 0x26, 0xe3, 0xb4, 0xce, - 0xcb, 0x13, 0xea, 0xfc, 0x7f, 0x4b, 0x71, 0x9f, 0xdd, 0x0e, 0x02, 0xcf, 0x7a, 0x30, 0x0a, 0xf0, - 0x27, 0x1b, 0x54, 0xad, 0x87, 0x23, 0x9b, 0xf9, 0xd9, 0x17, 0xb2, 0x29, 0xa3, 0xda, 0x33, 0xc7, - 0x78, 0x4f, 0x1e, 0xe3, 0x15, 0xca, 0xea, 0xc5, 0xb1, 0xac, 0x0a, 0x47, 0xfb, 0x67, 0x39, 0x88, - 0xff, 0x41, 0x81, 0x85, 0x44, 0xaf, 0xa0, 0xdb, 0x00, 0x46, 0x28, 0x3a, 0xb7, 0x8f, 0x73, 0xe3, - 0x9a, 0xa8, 0xc5, 0x68, 0xc8, 0x9c, 0xc8, 0xe2, 0xc5, 0x8c, 0x39, 0x31, 0x23, 0x5e, 0x0c, 0xc3, - 0xc5, 0xd7, 0xa3, 0xc5, 0x2e, 0xdb, 0x24, 0x55, 0x0b, 0x17, 0xbb, 0x8c, 0x56, 0x90, 0xa8, 0xbf, - 0x5e, 0x82, 0xa5, 0x2c, 0xee, 0xe8, 0x59, 0x28, 0xf7, 0xdd, 0x11, 0x6f, 0x89, 0x74, 0x34, 0xb4, - 0xe6, 0x8e, 0x76, 0x7c, 0x63, 0x0f, 0x6b, 0x04, 0x01, 0x5d, 0x85, 0x99, 0x21, 0x1e, 0x3a, 0xde, - 0x01, 0x97, 0x5b, 0xda, 0x6e, 0xb8, 0x4f, 0x4b, 0x18, 0x36, 0x47, 0x43, 0xd7, 0xa3, 0xb0, 0x9a, - 0xc9, 0xbb, 0x2a, 0xad, 0x1e, 0x58, 0x11, 0x23, 0x09, 0x63, 0xe9, 0xeb, 0x30, 0xeb, 0x7a, 0x4e, - 0x1f, 0xfb, 0x3e, 0xdf, 0x0d, 0x59, 0x4d, 0x9c, 0x55, 0x91, 0x22, 0x4e, 0xc3, 0x11, 0xd1, 0x2d, - 0x80, 0x28, 0x80, 0xe2, 0x33, 0x53, 0x2b, 0x37, 0xde, 0xf2, 0xb5, 0x18, 0xb6, 0xfa, 0xbd, 0x12, - 0x2c, 0x67, 0x6b, 0x0e, 0x5d, 0x8e, 0xeb, 0xe5, 0x64, 0x86, 0xaa, 0x65, 0xf5, 0xbc, 0x9a, 0x50, - 0xcf, 0x99, 0x0c, 0x8a, 0x2c, 0x2d, 0xdd, 0x4c, 0x6a, 0xe9, 0x6c, 0x06, 0x61, 0xb6, 0xb2, 0x6e, - 0x26, 0x95, 0x95, 0x45, 0x9a, 0xad, 0xb3, 0x76, 0x86, 0xce, 0xce, 0x67, 0xb5, 0x31, 0x5f, 0x75, - 0x7f, 0xab, 0xc0, 0x5c, 0x5c, 0x2e, 0x39, 0x64, 0x55, 0x12, 0x21, 0x2b, 0xda, 0x84, 0x45, 0x93, - 0xed, 0xdc, 0xea, 0x96, 0x1d, 0x60, 0x6f, 0xd7, 0xe8, 0x8b, 0xa8, 0xf0, 0x7c, 0x86, 0x5d, 0x74, - 0x04, 0x0e, 0x13, 0xbc, 0xc9, 0x69, 0x43, 0x30, 0x69, 0x41, 0xc8, 0x47, 0x78, 0xad, 0x09, 0x18, - 0xc5, 0x88, 0xd4, 0x7f, 0x51, 0xe0, 0x58, 0x86, 0x82, 0xc7, 0x34, 0x64, 0x27, 0xbf, 0x21, 0x17, - 0xf2, 0xbb, 0x6e, 0x6c, 0x7b, 0xde, 0xc9, 0x68, 0xcf, 0xe4, 0xfc, 0xe2, 0xcd, 0xfa, 0x99, 0x02, - 0xc7, 0x33, 0xb1, 0x32, 0xb7, 0x57, 0xaf, 0x43, 0xd5, 0x7b, 0xa2, 0x3f, 0x38, 0x08, 0xb0, 0x9f, - 0x35, 0xb0, 0x77, 0x62, 0x67, 0x28, 0xb3, 0xde, 0x93, 0x3b, 0x04, 0x0f, 0xbd, 0x0c, 0x35, 0xef, - 0x89, 0x8e, 0x3d, 0xcf, 0xf1, 0x84, 0x2f, 0xca, 0x25, 0xaa, 0x7a, 0x4f, 0x36, 0x28, 0x22, 0xa9, - 0x29, 0x10, 0x35, 0x55, 0xc6, 0xd4, 0x14, 0x44, 0x35, 0x05, 0x61, 0x4d, 0xd3, 0x63, 0x6a, 0x0a, - 0x78, 0x4d, 0xea, 0x1f, 0x96, 0xe0, 0x54, 0x91, 0xba, 0x3e, 0x31, 0x45, 0x6c, 0x00, 0xf2, 0x9e, - 0xe8, 0xae, 0xd1, 0x7f, 0x88, 0x03, 0x5f, 0x37, 0x3d, 0xc7, 0x75, 0xb1, 0x39, 0x4e, 0x23, 0x4d, - 0xef, 0x49, 0x97, 0x51, 0xac, 0x33, 0x82, 0x23, 0x69, 0x66, 0x03, 0x50, 0x90, 0xae, 0x7a, 0x8c, - 0x8a, 0x9a, 0x41, 0xa2, 0x6a, 0xf5, 0x43, 0x98, 0x8b, 0x7b, 0x88, 0x31, 0xb6, 0xff, 0x3a, 0x34, - 0xb8, 0x07, 0xd1, 0xfb, 0xce, 0xc8, 0x0e, 0xc6, 0x29, 0x6a, 0x8e, 0x63, 0xaf, 0x11, 0x64, 0xf5, - 0xeb, 0xe1, 0x70, 0xfb, 0xd4, 0xaa, 0xfc, 0x77, 0x05, 0x6a, 0x9d, 0xa1, 0xb1, 0x87, 0x7b, 0x2e, - 0xee, 0x93, 0x99, 0xde, 0x22, 0x1f, 0xbc, 0xdf, 0xd9, 0x07, 0x7a, 0x47, 0x8e, 0x5a, 0x58, 0x9c, - 0xfa, 0xac, 0x74, 0x8e, 0x28, 0x38, 0x8c, 0x59, 0x98, 0x5c, 0x83, 0xa5, 0x91, 0x8f, 0x3d, 0xdd, - 0x77, 0x71, 0xdf, 0xda, 0xb5, 0xb0, 0xa9, 0xb3, 0xea, 0x10, 0xad, 0x0e, 0x91, 0xb2, 0x9e, 0x28, - 0xa2, 0x3c, 0x3f, 0x76, 0x84, 0x72, 0x1d, 0xaa, 0x5f, 0xc4, 0x07, 0x6c, 0x0d, 0x3f, 0x21, 0x9d, - 0xfa, 0xed, 0x0a, 0xac, 0xe4, 0x9c, 0xee, 0xd0, 0x05, 0xa0, 0x3b, 0xd2, 0x5d, 0xec, 0x59, 0x8e, - 0x29, 0x3a, 0xa3, 0xef, 0x8e, 0xba, 0x14, 0x80, 0x4e, 0x02, 0xf9, 0xd0, 0xbf, 0x3e, 0x72, 0x78, - 0x8c, 0x59, 0xd6, 0xaa, 0x7d, 0x77, 0xf4, 0x25, 0xf2, 0x2d, 0x68, 0xfd, 0x7d, 0xc3, 0xc3, 0xcc, - 0x2d, 0x30, 0xda, 0x1e, 0x05, 0xa0, 0x17, 0xe1, 0x38, 0x9b, 0xf2, 0xf4, 0x81, 0x35, 0xb4, 0x88, - 0xf3, 0x8c, 0x59, 0x7c, 0x59, 0x43, 0xac, 0xf0, 0x1e, 0x29, 0xeb, 0xd8, 0xcc, 0xc6, 0x55, 0x68, - 0x38, 0xce, 0x50, 0xf7, 0xfb, 0x8e, 0x87, 0x75, 0xc3, 0xfc, 0x90, 0x9a, 0x77, 0x59, 0xab, 0x3b, - 0xce, 0xb0, 0x47, 0x60, 0x6d, 0xf3, 0x43, 0x74, 0x16, 0xea, 0x7d, 0x77, 0xe4, 0xe3, 0x40, 0x27, - 0x7f, 0xe8, 0x1e, 0x5c, 0x4d, 0x03, 0x06, 0x5a, 0x73, 0x47, 0x7e, 0x0c, 0x61, 0x48, 0x56, 0x5d, - 0xb3, 0x71, 0x84, 0xfb, 0x78, 0x48, 0x0f, 0xb1, 0xf7, 0x47, 0x7b, 0xd8, 0x35, 0xf6, 0x30, 0x13, - 0x4d, 0x6c, 0xa4, 0x49, 0x87, 0xd8, 0xef, 0x70, 0x14, 0x2a, 0xa0, 0x36, 0xbf, 0x1f, 0xff, 0xf4, - 0xd1, 0xbb, 0x30, 0x3b, 0xb2, 0x69, 0xbf, 0xae, 0xd6, 0x28, 0xed, 0xb5, 0x09, 0xce, 0xd2, 0xae, - 0xec, 0x30, 0x12, 0x7e, 0xb4, 0xc7, 0x19, 0xa0, 0x5b, 0xd0, 0xe2, 0x8a, 0xf2, 0x1f, 0x1b, 0x6e, - 0x52, 0x5b, 0x40, 0x55, 0xb0, 0xcc, 0x30, 0x7a, 0x8f, 0x0d, 0x37, 0xae, 0xb1, 0xd6, 0x2d, 0x98, - 0x8b, 0x33, 0x3d, 0x94, 0x2d, 0xdd, 0x81, 0x86, 0xd4, 0x48, 0xd2, 0xdb, 0x54, 0x29, 0xbe, 0xf5, - 0x0d, 0x31, 0x64, 0xaa, 0x04, 0xd0, 0xb3, 0xbe, 0x41, 0x53, 0x0f, 0xa8, 0x64, 0x94, 0x4f, 0x45, - 0x63, 0x1f, 0xaa, 0x01, 0x0d, 0xe9, 0xb4, 0x9f, 0x78, 0x5a, 0x7a, 0xac, 0xcf, 0x3d, 0x2d, 0xf9, - 0x4d, 0x60, 0x9e, 0x33, 0x10, 0x12, 0xd0, 0xdf, 0x04, 0x46, 0xcf, 0x95, 0xd9, 0x29, 0x19, 0xfd, - 0x4d, 0xab, 0xc0, 0x8f, 0x78, 0xda, 0x4e, 0x4d, 0x63, 0x1f, 0xea, 0xef, 0x28, 0x00, 0x6b, 0x86, - 0x6b, 0x3c, 0xb0, 0x06, 0x56, 0x70, 0x80, 0x2e, 0x42, 0xd3, 0x30, 0x4d, 0xbd, 0x2f, 0x20, 0x16, - 0x16, 0x79, 0x54, 0x0b, 0x86, 0x69, 0xae, 0xc5, 0xc0, 0xe8, 0x79, 0x58, 0x24, 0x7e, 0x52, 0xc6, - 0x65, 0x89, 0x55, 0x4d, 0x52, 0x20, 0x21, 0xdf, 0x80, 0x55, 0xc2, 0xd7, 0x18, 0x3e, 0xb0, 0xb0, - 0x1d, 0xc8, 0x34, 0x2c, 0xe3, 0x6a, 0xd9, 0x30, 0xcd, 0x36, 0x2b, 0x8e, 0x53, 0xaa, 0x7f, 0x33, - 0x03, 0xa7, 0xe5, 0x1e, 0x4f, 0x26, 0x60, 0xdc, 0x82, 0xb9, 0x84, 0xbc, 0xa9, 0xd4, 0x85, 0xa8, - 0x85, 0x9a, 0x84, 0x9b, 0x48, 0x31, 0x28, 0xa5, 0x52, 0x0c, 0x32, 0x93, 0x3b, 0xca, 0x9f, 0x50, - 0x72, 0x47, 0xe5, 0x63, 0x26, 0x77, 0x4c, 0x1f, 0x35, 0xb9, 0x63, 0x6e, 0xe2, 0xe4, 0x8e, 0x67, - 0xe9, 0xe6, 0x90, 0xa8, 0x91, 0xce, 0xf2, 0xcc, 0x27, 0x34, 0x42, 0xee, 0xb6, 0x48, 0xee, 0x4b, - 0x24, 0x81, 0xcc, 0x1e, 0x26, 0x09, 0xa4, 0x9a, 0x9b, 0x04, 0x72, 0x0e, 0xe6, 0x6c, 0x47, 0xb7, - 0xf1, 0x63, 0x9d, 0x74, 0x8b, 0xbf, 0x5a, 0x67, 0x7d, 0x64, 0x3b, 0x9b, 0xf8, 0x71, 0x97, 0x40, - 0xd0, 0x79, 0x98, 0x1b, 0x1a, 0xfe, 0x43, 0x6c, 0xd2, 0x6c, 0x0c, 0x7f, 0xb5, 0x41, 0xed, 0xa9, - 0xce, 0x60, 0x5d, 0x02, 0x42, 0xcf, 0x40, 0x28, 0x07, 0x47, 0x9a, 0xa7, 0x48, 0x0d, 0x01, 0x65, - 0x68, 0xb1, 0x84, 0x92, 0x85, 0x23, 0x26, 0x94, 0x34, 0x0f, 0x93, 0x50, 0x72, 0x19, 0x9a, 0xe2, - 0xb7, 0xc8, 0x28, 0x61, 0x07, 0x04, 0x34, 0x99, 0x64, 0x41, 0x94, 0x89, 0xac, 0x91, 0xbc, 0xfc, - 0x13, 0x28, 0xcc, 0x3f, 0xf9, 0x23, 0x85, 0x2f, 0x55, 0xc3, 0x01, 0xc4, 0x0f, 0xbe, 0xa5, 0x9c, - 0x05, 0xe5, 0x28, 0x39, 0x0b, 0x68, 0x3b, 0x37, 0xab, 0xe3, 0x62, 0x3e, 0xa7, 0x71, 0x79, 0x1d, - 0xea, 0xfd, 0x70, 0x15, 0xf9, 0x49, 0x64, 0xa7, 0xa9, 0xff, 0xa5, 0xc0, 0x69, 0xce, 0x2f, 0x27, - 0x85, 0x2b, 0xc3, 0xca, 0x95, 0x1c, 0x2b, 0xef, 0x7b, 0xd8, 0xc4, 0x76, 0x60, 0x19, 0x03, 0x1a, - 0x97, 0x88, 0x83, 0xe1, 0x08, 0x4c, 0x43, 0xa3, 0xf3, 0x30, 0xc7, 0xb2, 0x2c, 0xf9, 0x82, 0x92, - 0x25, 0x53, 0xd6, 0x69, 0xa2, 0x25, 0x5f, 0x33, 0x6e, 0x65, 0x79, 0x96, 0x4a, 0xee, 0x4e, 0xc4, - 0x58, 0x07, 0xa3, 0x3a, 0xb0, 0x92, 0x73, 0x44, 0x9f, 0xd9, 0x4d, 0x4a, 0xba, 0x9b, 0x0a, 0x95, - 0x94, 0xee, 0xa6, 0x6f, 0x2b, 0x70, 0x36, 0xb5, 0xb0, 0xfd, 0xec, 0x35, 0xab, 0xfe, 0xa9, 0x12, - 0xda, 0x4f, 0xd2, 0xe4, 0xd7, 0xd2, 0x26, 0xff, 0x4c, 0xd1, 0x3a, 0x3d, 0xd3, 0xe8, 0xdf, 0xcb, - 0x35, 0xfa, 0xe7, 0x0b, 0xd7, 0xfc, 0xe3, 0xf4, 0xf9, 0x6f, 0x0a, 0x9c, 0xc8, 0x15, 0x20, 0x11, - 0x0f, 0x2a, 0xc9, 0x78, 0x90, 0xc7, 0x92, 0x51, 0x50, 0xcf, 0x62, 0x49, 0x1a, 0xb7, 0xf3, 0xa0, - 0x4d, 0x1f, 0x1a, 0x4f, 0xac, 0xe1, 0x68, 0xc8, 0x83, 0x49, 0xc2, 0xee, 0x3e, 0x83, 0x1c, 0x25, - 0x9a, 0xbc, 0x0a, 0x4b, 0xcc, 0xd1, 0xd3, 0x80, 0x26, 0xa2, 0x60, 0x41, 0xe5, 0x22, 0x2b, 0x23, - 0xb1, 0x0d, 0x27, 0x50, 0xdb, 0xb0, 0x18, 0x36, 0xab, 0x30, 0x45, 0x29, 0x96, 0x72, 0x54, 0x92, - 0x53, 0x8e, 0x6c, 0x98, 0x59, 0xc7, 0x8f, 0xac, 0x3e, 0xfe, 0x44, 0xb2, 0x9d, 0xcf, 0x41, 0xdd, - 0xc5, 0xde, 0xd0, 0xf2, 0xfd, 0x70, 0x56, 0xaf, 0x69, 0x71, 0x90, 0x7a, 0x16, 0x6a, 0x6b, 0xeb, - 0x1d, 0x5e, 0x65, 0x86, 0xa8, 0xea, 0x7f, 0xcf, 0xc0, 0x42, 0xd2, 0xc6, 0x6e, 0xa6, 0x52, 0xa0, - 0x4e, 0x67, 0x6e, 0x9f, 0x65, 0xec, 0x1b, 0x3f, 0x2f, 0x56, 0x54, 0xa5, 0x74, 0x7e, 0x40, 0xb8, - 0x6a, 0x12, 0x0b, 0xad, 0x55, 0x98, 0xed, 0x3b, 0xc3, 0xa1, 0x61, 0x9b, 0x22, 0x67, 0x9d, 0x7f, - 0x12, 0x49, 0x0d, 0x6f, 0x8f, 0xed, 0x18, 0xd7, 0x34, 0xfa, 0x9b, 0x98, 0x00, 0x71, 0x86, 0x96, - 0x4d, 0x93, 0xa8, 0x68, 0x2f, 0xd5, 0x34, 0xe0, 0xa0, 0x75, 0xcb, 0x43, 0x17, 0xa0, 0x82, 0xed, - 0x47, 0xe2, 0x28, 0x49, 0xda, 0xb9, 0x14, 0x6b, 0x22, 0x8d, 0x62, 0xa0, 0x8b, 0x30, 0x33, 0x24, - 0x66, 0x25, 0x0e, 0xda, 0x17, 0x53, 0xb9, 0xdd, 0x1a, 0x47, 0x40, 0x2f, 0xc0, 0xac, 0x49, 0xb5, - 0x27, 0x16, 0x01, 0x48, 0x4a, 0xc7, 0xa2, 0x45, 0x9a, 0x40, 0x41, 0x6f, 0x85, 0xdb, 0xe6, 0xb5, - 0xf4, 0x79, 0x56, 0x42, 0xcd, 0x99, 0x3b, 0xe6, 0x9b, 0xf2, 0xda, 0x13, 0xd2, 0x9b, 0xef, 0x49, - 0x2e, 0xc5, 0x2b, 0xd0, 0x13, 0x50, 0x1d, 0x38, 0x7b, 0xcc, 0x7a, 0xea, 0xec, 0xc2, 0xc3, 0xc0, - 0xd9, 0xa3, 0xc6, 0xb3, 0x04, 0xd3, 0x7e, 0x60, 0x5a, 0x36, 0x8d, 0xa5, 0xaa, 0x1a, 0xfb, 0x20, - 0x83, 0x94, 0xfe, 0xd0, 0x1d, 0xbb, 0x8f, 0x57, 0x1b, 0xb4, 0xa8, 0x46, 0x21, 0x5b, 0x76, 0x9f, - 0xae, 0x29, 0x83, 0xe0, 0x60, 0x75, 0x9e, 0xc2, 0xc9, 0xcf, 0x68, 0xf7, 0x7a, 0x21, 0x67, 0xf7, - 0x3a, 0x21, 0x70, 0xc6, 0xee, 0x75, 0x33, 0x77, 0xce, 0x48, 0xd2, 0x0a, 0x12, 0x12, 0x47, 0xae, - 0xad, 0x77, 0x74, 0xd1, 0x35, 0x8b, 0xe9, 0x54, 0xf1, 0xd0, 0xec, 0x35, 0x08, 0x7f, 0x7e, 0xa6, - 0x87, 0x07, 0xdf, 0x53, 0x60, 0x79, 0x8d, 0x1e, 0x9d, 0xc6, 0x7c, 0xe3, 0x61, 0xb2, 0x8e, 0x5e, - 0x0a, 0x53, 0xc1, 0x32, 0xf2, 0x79, 0x92, 0x9a, 0x12, 0x99, 0x60, 0x6b, 0x30, 0x2f, 0xd8, 0x72, - 0xe2, 0xf2, 0x04, 0x79, 0x64, 0x0d, 0x3f, 0xfe, 0xa9, 0xbe, 0x0e, 0x2b, 0x29, 0xc9, 0xf9, 0x01, - 0x56, 0xf2, 0x4e, 0x01, 0x13, 0x3c, 0x7e, 0xa7, 0x40, 0xbd, 0x05, 0xc7, 0x7b, 0x81, 0xe1, 0x05, - 0xa9, 0x66, 0x4f, 0x40, 0x4b, 0x33, 0xc4, 0x64, 0x5a, 0x9e, 0xc4, 0xd5, 0x83, 0xa5, 0x5e, 0xe0, - 0xb8, 0x47, 0x60, 0x4a, 0xfc, 0x0e, 0x69, 0xb9, 0x33, 0x12, 0xf3, 0x8c, 0xf8, 0x54, 0x57, 0x58, - 0x3e, 0x5b, 0xba, 0xb6, 0x2f, 0xc0, 0x32, 0x4b, 0x27, 0x3b, 0x4a, 0x23, 0x4e, 0x88, 0x64, 0xb6, - 0x34, 0xdf, 0xbb, 0x70, 0x4c, 0xda, 0x52, 0xe7, 0xe9, 0x17, 0xd7, 0xe4, 0xf4, 0x8b, 0xfc, 0xd3, - 0x8b, 0x30, 0xfb, 0xe2, 0x3b, 0xa5, 0x98, 0x1f, 0xcf, 0x39, 0x83, 0x7d, 0x45, 0x4e, 0xbe, 0x38, - 0x9b, 0xcf, 0x55, 0xca, 0xbd, 0x48, 0x5b, 0x67, 0x39, 0xc3, 0x3a, 0x77, 0x52, 0x07, 0xbc, 0x95, - 0x74, 0xf2, 0x4c, 0x42, 0xc2, 0x4f, 0xe5, 0x68, 0xf7, 0x1e, 0x4b, 0xd0, 0x08, 0xab, 0x0e, 0x4f, - 0x75, 0x5f, 0x4a, 0x9c, 0xea, 0x9e, 0x2c, 0x90, 0x34, 0x3c, 0xcf, 0xfd, 0x4e, 0x05, 0x6a, 0x61, - 0x59, 0x4a, 0xc3, 0x69, 0x55, 0x95, 0x32, 0x54, 0x15, 0x9f, 0x5f, 0xcb, 0x47, 0x9c, 0x5f, 0x2b, - 0x13, 0xcc, 0xaf, 0x27, 0xa1, 0x46, 0x7f, 0xd0, 0x9c, 0x7a, 0x36, 0x5f, 0x56, 0x29, 0x40, 0xc3, - 0xbb, 0x91, 0x89, 0xcd, 0x4c, 0x68, 0x62, 0x89, 0x64, 0x90, 0xd9, 0x64, 0x32, 0xc8, 0xcd, 0x70, - 0xee, 0xab, 0xa6, 0x0f, 0x5f, 0x42, 0x8e, 0x99, 0xb3, 0x5e, 0x62, 0xc7, 0xb5, 0x96, 0xde, 0x71, - 0x8d, 0xe8, 0x3f, 0xb7, 0x87, 0xc3, 0x5b, 0x2c, 0xc3, 0x23, 0x6e, 0x67, 0xdc, 0x47, 0xbe, 0x22, - 0x1d, 0xae, 0x29, 0x19, 0x73, 0x55, 0xe8, 0x17, 0xe2, 0x07, 0x6a, 0x3b, 0xb0, 0x9c, 0xcc, 0x0c, - 0x3b, 0x94, 0x8f, 0xcb, 0x49, 0x51, 0xfd, 0xcd, 0x78, 0xc4, 0x97, 0x93, 0x8f, 0x79, 0x33, 0x95, - 0x3a, 0x30, 0xb1, 0x85, 0x5e, 0x93, 0xb3, 0x8c, 0x0e, 0x6d, 0x57, 0xa9, 0x24, 0x23, 0x1a, 0x91, - 0x18, 0x1e, 0x2f, 0x66, 0xc1, 0x79, 0x8d, 0x43, 0xda, 0x74, 0x65, 0xb0, 0x6b, 0xd9, 0x96, 0xbf, - 0xcf, 0xca, 0x67, 0xd8, 0xca, 0x40, 0x80, 0xda, 0x74, 0xd7, 0x12, 0x3f, 0xb1, 0x02, 0xbd, 0xef, - 0x98, 0x98, 0x5a, 0xed, 0xb4, 0x56, 0x25, 0x80, 0x35, 0xc7, 0xc4, 0xd1, 0x78, 0xaa, 0x1e, 0x76, - 0x3c, 0xd5, 0x12, 0xe3, 0x69, 0x19, 0x66, 0x3c, 0x6c, 0xf8, 0x8e, 0xcd, 0x36, 0x33, 0x34, 0xfe, - 0x45, 0x3a, 0x62, 0x88, 0x7d, 0x9f, 0xd4, 0xc1, 0x03, 0x30, 0xfe, 0x19, 0x0b, 0x16, 0xe7, 0x0a, - 0x82, 0xc5, 0x82, 0x6c, 0xcf, 0x44, 0xb0, 0xd8, 0x28, 0x08, 0x16, 0x27, 0x4a, 0xf6, 0x8c, 0xc2, - 0xe2, 0xf9, 0x71, 0x61, 0x71, 0x3c, 0xae, 0x5c, 0x90, 0xe3, 0xca, 0xd7, 0xe3, 0x2b, 0xd4, 0x66, - 0xfa, 0xec, 0xbb, 0xf8, 0x0e, 0xc9, 0x67, 0x38, 0x80, 0xff, 0x51, 0x81, 0x95, 0xd4, 0x80, 0xe3, - 0x43, 0xf8, 0xa5, 0x44, 0x1a, 0x69, 0x61, 0xfe, 0xa6, 0xc8, 0x22, 0x6d, 0x4b, 0x59, 0xa4, 0x97, - 0x8b, 0x48, 0x72, 0x92, 0x48, 0x8f, 0x9e, 0xd8, 0xf9, 0x2d, 0x05, 0x50, 0xc6, 0x1a, 0xfc, 0xa6, - 0x88, 0xd6, 0x0f, 0xb1, 0x5b, 0xc6, 0x03, 0xf6, 0xb7, 0xa2, 0x80, 0xbd, 0x74, 0x98, 0x7d, 0x87, - 0x30, 0xe3, 0xe4, 0xc7, 0x25, 0x38, 0xbb, 0xe3, 0x9a, 0x89, 0x30, 0x92, 0x63, 0x4d, 0xee, 0xd9, - 0x6e, 0xca, 0xe9, 0x32, 0x47, 0x6c, 0x42, 0xf9, 0x28, 0x4d, 0x40, 0x5f, 0xcb, 0x4a, 0x68, 0x7a, - 0x5d, 0x3a, 0x7a, 0x2c, 0x6e, 0xe0, 0x98, 0xe9, 0xeb, 0xe3, 0x9a, 0xb0, 0x0a, 0xe7, 0xf2, 0x05, - 0xe0, 0x21, 0xe7, 0x2f, 0xc3, 0xc2, 0xc6, 0x13, 0xdc, 0xef, 0x1d, 0xd8, 0xfd, 0x43, 0x68, 0xbd, - 0x09, 0xe5, 0xfe, 0xd0, 0xe4, 0xa7, 0x23, 0xe4, 0x67, 0x3c, 0x8a, 0x2e, 0xcb, 0x51, 0xb4, 0x0e, - 0xcd, 0xa8, 0x06, 0x3e, 0x80, 0x96, 0xc9, 0x00, 0x32, 0x09, 0x32, 0x61, 0x3e, 0xa7, 0xf1, 0x2f, - 0x0e, 0xc7, 0x1e, 0xbb, 0xa0, 0xc2, 0xe0, 0xd8, 0xf3, 0x64, 0xaf, 0x5d, 0x96, 0xbd, 0xb6, 0xfa, - 0x5d, 0x05, 0xea, 0xa4, 0x86, 0x8f, 0x25, 0x3f, 0x5f, 0xca, 0x96, 0xa3, 0xa5, 0x6c, 0xb8, 0x22, - 0xae, 0xc4, 0x57, 0xc4, 0x91, 0xe4, 0xd3, 0x14, 0x9c, 0x96, 0x7c, 0x26, 0x84, 0x63, 0xcf, 0x53, - 0xcf, 0xc1, 0x1c, 0x93, 0x8d, 0xb7, 0xbc, 0x09, 0xe5, 0x91, 0x37, 0x10, 0xfd, 0x37, 0xf2, 0x06, - 0xea, 0x37, 0x15, 0x68, 0xb4, 0x83, 0xc0, 0xe8, 0xef, 0x1f, 0xa2, 0x01, 0xa1, 0x70, 0xa5, 0xb8, - 0x70, 0xe9, 0x46, 0x44, 0xe2, 0x56, 0x72, 0xc4, 0x9d, 0x96, 0xc4, 0x55, 0x61, 0x5e, 0xc8, 0x92, - 0x2b, 0xf0, 0x26, 0xa0, 0xae, 0xe3, 0x05, 0x6f, 0x3b, 0xde, 0x63, 0xc3, 0x33, 0x0f, 0xb7, 0x6a, - 0x45, 0x50, 0xe1, 0x4f, 0x06, 0x94, 0x2f, 0x4c, 0x6b, 0xf4, 0xb7, 0xfa, 0x1c, 0x1c, 0x93, 0xf8, - 0xe5, 0x56, 0x7c, 0x0b, 0xea, 0x74, 0x16, 0xe6, 0x0b, 0x9a, 0xe7, 0xe3, 0xe7, 0xf5, 0x63, 0x66, - 0x6b, 0x75, 0x1d, 0x16, 0x49, 0x3c, 0x46, 0xe1, 0xa1, 0x7f, 0xb9, 0x9a, 0x88, 0xf9, 0x57, 0x52, - 0x2c, 0x12, 0xf1, 0xfe, 0x4f, 0x15, 0x98, 0xa6, 0xf0, 0x54, 0x8c, 0x74, 0x92, 0xcc, 0x73, 0xae, - 0xa3, 0x07, 0xc6, 0x5e, 0xf8, 0x1c, 0x03, 0x01, 0x6c, 0x1b, 0x7b, 0xf4, 0x44, 0x87, 0x16, 0x9a, - 0xd6, 0x1e, 0xf6, 0x03, 0x71, 0x42, 0x58, 0x27, 0xb0, 0x75, 0x06, 0x22, 0x8a, 0xa1, 0x07, 0xa9, - 0x15, 0x7a, 0x5e, 0x4a, 0x7f, 0xa3, 0x0b, 0xec, 0x6e, 0x63, 0xf1, 0xb1, 0x18, 0xbd, 0xf3, 0xd8, - 0x82, 0x6a, 0xe2, 0x3c, 0x2b, 0xfc, 0x46, 0x17, 0xa1, 0x42, 0xf7, 0x9f, 0x67, 0x8b, 0xb4, 0x44, - 0x51, 0x88, 0x55, 0xb8, 0x96, 0x6d, 0x63, 0x93, 0x06, 0x40, 0x55, 0x8d, 0x7f, 0xa9, 0x6f, 0x01, - 0x8a, 0x2b, 0x8f, 0x77, 0xd0, 0x45, 0x98, 0xa1, 0xba, 0x15, 0x41, 0xec, 0x62, 0x8a, 0xb5, 0xc6, - 0x11, 0xd4, 0xaf, 0x02, 0x62, 0x75, 0x49, 0x81, 0xeb, 0x61, 0x3a, 0xb0, 0x20, 0x84, 0xfd, 0x33, - 0x05, 0x8e, 0x49, 0xdc, 0xb9, 0x7c, 0xcf, 0xc9, 0xec, 0x33, 0xc4, 0xe3, 0xac, 0xdf, 0x90, 0x66, - 0xe6, 0x8b, 0x69, 0x31, 0x7e, 0x4e, 0xb3, 0xf2, 0x3f, 0x29, 0x00, 0xed, 0x51, 0xb0, 0xcf, 0x37, - 0x5a, 0xe3, 0x9d, 0xa8, 0x24, 0x3a, 0xb1, 0x05, 0x55, 0xd7, 0xf0, 0xfd, 0xc7, 0x8e, 0x27, 0x16, - 0x91, 0xe1, 0x37, 0xdd, 0x1e, 0x1d, 0xf1, 0x37, 0x1a, 0x6a, 0x1a, 0xfd, 0x8d, 0x9e, 0x81, 0x79, - 0xf6, 0x4e, 0x88, 0x6e, 0x98, 0xa6, 0x27, 0x72, 0x00, 0x6b, 0x5a, 0x83, 0x41, 0xdb, 0x0c, 0x48, - 0xd0, 0x2c, 0x7a, 0x1a, 0x11, 0x1c, 0xe8, 0x81, 0xf3, 0x10, 0xdb, 0x7c, 0x61, 0xd8, 0x10, 0xd0, - 0x6d, 0x02, 0x64, 0xc7, 0x8d, 0x7b, 0x96, 0x1f, 0x78, 0x02, 0x4d, 0x1c, 0x9a, 0x72, 0x28, 0x45, - 0x53, 0xff, 0x58, 0x81, 0x66, 0x77, 0x34, 0x18, 0x30, 0xe5, 0x1e, 0xa5, 0x93, 0x2f, 0xf1, 0xa6, - 0x94, 0xd2, 0x26, 0x1f, 0x29, 0x8a, 0x37, 0xf1, 0x13, 0xd9, 0xcb, 0xba, 0x06, 0x8b, 0x31, 0x89, - 0xb9, 0xe1, 0x48, 0x91, 0xbd, 0x22, 0x47, 0xf6, 0x6a, 0x1b, 0x10, 0xdb, 0xbe, 0x39, 0x72, 0x2b, - 0xd5, 0xe3, 0x70, 0x4c, 0x62, 0xc1, 0xa7, 0xe2, 0x4b, 0xd0, 0xe0, 0xf9, 0x68, 0xdc, 0x20, 0x4e, - 0x40, 0x95, 0xb8, 0xd4, 0xbe, 0x65, 0x8a, 0x0c, 0x89, 0x59, 0xd7, 0x31, 0xd7, 0x2c, 0xd3, 0x53, - 0xbf, 0x04, 0x0d, 0x7e, 0xe1, 0x9d, 0xe3, 0xde, 0x86, 0x79, 0x7e, 0x3e, 0xa8, 0x4b, 0x37, 0x44, - 0x4f, 0x64, 0x24, 0x3d, 0x0a, 0x55, 0xd8, 0xf1, 0x4f, 0xf5, 0x6b, 0xd0, 0x62, 0xd1, 0x82, 0xc4, - 0x58, 0x34, 0xf0, 0x36, 0x88, 0xeb, 0x13, 0x05, 0xfc, 0x65, 0xca, 0x86, 0x17, 0xff, 0x54, 0x4f, - 0xc3, 0xc9, 0x4c, 0xfe, 0xbc, 0xf5, 0x2e, 0x34, 0xa3, 0x02, 0x76, 0x8d, 0x31, 0x4c, 0xfb, 0x50, - 0x62, 0x69, 0x1f, 0xcb, 0x61, 0xec, 0x5d, 0x12, 0x33, 0x17, 0x0d, 0xaf, 0xa3, 0x15, 0x57, 0x39, - 0x6f, 0xc5, 0x55, 0x91, 0x56, 0x5c, 0xea, 0xfd, 0x50, 0x87, 0x7c, 0xdd, 0xfb, 0x3a, 0x5d, 0x99, - 0xb3, 0xba, 0x85, 0x53, 0x3b, 0x95, 0xdd, 0x3e, 0x86, 0xa4, 0xc5, 0xf0, 0xd5, 0x8b, 0xd0, 0x90, - 0xdd, 0x5b, 0xcc, 0x63, 0x29, 0x29, 0x8f, 0x35, 0x9f, 0x70, 0x56, 0x2f, 0x26, 0x96, 0x14, 0x59, - 0x7a, 0x4d, 0x2c, 0x28, 0x6e, 0x48, 0x6e, 0xeb, 0x69, 0xe9, 0x88, 0xfe, 0xe7, 0xe4, 0xb1, 0x96, - 0xb8, 0x1f, 0x7f, 0xdb, 0x27, 0xf4, 0xbc, 0xa1, 0xea, 0x53, 0x50, 0xdf, 0xc9, 0x7b, 0x76, 0xa4, - 0x22, 0xf2, 0xca, 0x5e, 0x85, 0xa5, 0xb7, 0xad, 0x01, 0xf6, 0x0f, 0xfc, 0x00, 0x0f, 0x3b, 0xd4, - 0xbd, 0xec, 0x5a, 0xd8, 0x43, 0x67, 0x00, 0xe8, 0x2a, 0xd2, 0x75, 0xac, 0xf0, 0xa9, 0x85, 0x18, - 0x44, 0xfd, 0x91, 0x02, 0x0b, 0x11, 0xe1, 0x24, 0x39, 0x81, 0xaf, 0xc0, 0xf4, 0xae, 0x2f, 0x76, - 0xdb, 0x12, 0x67, 0x10, 0x59, 0x22, 0x68, 0x95, 0x5d, 0xbf, 0x63, 0xa2, 0x57, 0x01, 0x46, 0x3e, - 0x36, 0xf9, 0xb1, 0xdf, 0x98, 0x2c, 0xcd, 0x1a, 0x41, 0x65, 0x07, 0x87, 0x37, 0xa0, 0x6e, 0xd9, - 0x8e, 0x89, 0xe9, 0x91, 0xb0, 0x39, 0x2e, 0x43, 0x13, 0x18, 0xee, 0x8e, 0x8f, 0x4d, 0xf5, 0xf7, - 0xa3, 0x83, 0xdd, 0xcf, 0x73, 0x0b, 0x55, 0x9d, 0xcf, 0xaf, 0xa2, 0xd7, 0xb9, 0xc9, 0xbe, 0x03, - 0x8b, 0xcc, 0x4d, 0xee, 0x86, 0x55, 0x66, 0xde, 0x5c, 0x49, 0xb4, 0x4d, 0x6b, 0x5a, 0x3c, 0xb2, - 0x12, 0x44, 0xea, 0x2d, 0x38, 0x9e, 0x48, 0x25, 0x9f, 0x7c, 0x3b, 0xfd, 0xdd, 0xc4, 0xbe, 0x58, - 0x34, 0xa4, 0xae, 0xc9, 0x37, 0x98, 0x8a, 0x92, 0xfe, 0xf9, 0x65, 0x9a, 0x1d, 0x38, 0x21, 0x6d, - 0xda, 0x49, 0xb2, 0xdc, 0x48, 0x04, 0x8b, 0xe7, 0xf2, 0xf9, 0x25, 0xa2, 0xc6, 0xff, 0x51, 0x60, - 0x29, 0x0b, 0xe1, 0x88, 0x1b, 0xc6, 0x1f, 0xe4, 0xdc, 0x7e, 0x7c, 0x69, 0x9c, 0x40, 0x9f, 0xca, - 0x06, 0xfb, 0x26, 0xbb, 0x3b, 0x35, 0xbe, 0x4f, 0xca, 0x93, 0xf5, 0xc9, 0x4f, 0x4b, 0xb1, 0x43, - 0x91, 0x82, 0xfb, 0x4d, 0x1f, 0x63, 0x93, 0x72, 0x2d, 0x71, 0xbd, 0xe9, 0xf9, 0x4c, 0xc2, 0x31, - 0xb7, 0x9b, 0xb4, 0xac, 0xcd, 0x80, 0x6b, 0xe3, 0x38, 0x7d, 0x6e, 0xf7, 0xaf, 0x7f, 0xab, 0x04, - 0xf3, 0x72, 0x87, 0xa0, 0xb7, 0x32, 0xee, 0x36, 0x9d, 0x1d, 0xd3, 0x40, 0xe9, 0x6a, 0x13, 0xbf, - 0x4b, 0x54, 0x9a, 0xfc, 0x2e, 0x51, 0x79, 0xb2, 0xbb, 0x44, 0x77, 0x60, 0xfe, 0xb1, 0x67, 0x05, - 0xc6, 0x83, 0x01, 0xd6, 0x07, 0xc6, 0x01, 0xf6, 0xb8, 0x17, 0x2e, 0x74, 0x43, 0x0d, 0x41, 0x72, - 0x8f, 0x50, 0xd0, 0x65, 0xd2, 0x63, 0xc3, 0xe5, 0xab, 0x2d, 0x29, 0x80, 0xeb, 0x3d, 0x36, 0x5c, - 0x46, 0x43, 0x51, 0xd4, 0x6f, 0x96, 0xe0, 0x78, 0xe6, 0x0d, 0x98, 0x8f, 0xaf, 0xa2, 0xcb, 0x71, - 0x15, 0x1d, 0xe6, 0x5a, 0x51, 0xf9, 0x50, 0xd7, 0x8a, 0x3a, 0x39, 0x0a, 0xcb, 0x3a, 0x75, 0x2f, - 0xd6, 0x9b, 0xfa, 0x97, 0x0a, 0x54, 0x85, 0x50, 0x63, 0x2f, 0xf9, 0xac, 0x8c, 0x08, 0x9a, 0x4e, - 0x33, 0xb6, 0x6d, 0xc3, 0x76, 0x74, 0x1f, 0x93, 0x08, 0x6a, 0xec, 0x95, 0x8a, 0x25, 0x4a, 0xb7, - 0xe6, 0x78, 0x78, 0xd3, 0xb0, 0x9d, 0x1e, 0x23, 0x42, 0x6d, 0x68, 0x32, 0x7e, 0x94, 0x15, 0x61, - 0x3a, 0x76, 0x56, 0x9b, 0xa7, 0x04, 0x84, 0x09, 0x61, 0xe6, 0xab, 0xdf, 0x57, 0x60, 0x21, 0xa1, - 0xd9, 0x5f, 0xbc, 0x46, 0xfc, 0x5e, 0x19, 0xea, 0xb1, 0x5e, 0x1e, 0xd3, 0x80, 0x35, 0x58, 0x14, - 0x99, 0x33, 0x3e, 0x0e, 0x26, 0xbb, 0xd2, 0xb2, 0xc0, 0x29, 0x7a, 0x38, 0x60, 0x41, 0xcf, 0x6d, - 0x58, 0x30, 0x1e, 0x19, 0xd6, 0x80, 0x5a, 0xd0, 0x44, 0xf1, 0xc4, 0x7c, 0x88, 0x1f, 0x86, 0x4d, - 0xac, 0xdd, 0x13, 0x5d, 0x6c, 0x01, 0x8a, 0x1b, 0xdd, 0x2f, 0xf2, 0xfd, 0x58, 0x7a, 0x56, 0xe1, - 0xfd, 0x22, 0xdf, 0x0f, 0xeb, 0xa3, 0xe9, 0xea, 0xf4, 0x62, 0x95, 0xcf, 0x5f, 0xe3, 0xc8, 0xaf, - 0x8f, 0xe0, 0xbe, 0x4d, 0x51, 0x89, 0xc2, 0x86, 0xc6, 0x87, 0x8e, 0xa7, 0xc7, 0xe9, 0x67, 0xc7, - 0x28, 0x8c, 0x52, 0x74, 0x43, 0x26, 0xea, 0x9f, 0x2b, 0x50, 0x0b, 0xfd, 0xc8, 0x98, 0x1e, 0xea, - 0xc0, 0x12, 0xcd, 0xed, 0x4f, 0x6a, 0x78, 0x4c, 0x27, 0x21, 0x42, 0xd4, 0x96, 0xb5, 0xdc, 0x86, - 0x26, 0x65, 0x15, 0x57, 0xf5, 0xb8, 0x8e, 0xf2, 0x85, 0x98, 0x2c, 0xfa, 0xfb, 0xab, 0x12, 0xa0, - 0xb4, 0x2b, 0xf9, 0x85, 0x31, 0xb2, 0x78, 0xa7, 0x55, 0x26, 0xef, 0xf4, 0xbb, 0x70, 0xac, 0xef, - 0x0c, 0x87, 0x16, 0xbd, 0x17, 0xe2, 0x78, 0x07, 0x93, 0x99, 0xdb, 0x22, 0xa3, 0x61, 0x7a, 0x62, - 0xea, 0x7b, 0x13, 0x4e, 0x68, 0xd8, 0x71, 0xb1, 0x1d, 0xba, 0xfe, 0x7b, 0xce, 0xde, 0x21, 0xe2, - 0xdb, 0x53, 0xd0, 0xca, 0xa2, 0xe7, 0xab, 0xe6, 0x11, 0xb4, 0xd6, 0xf6, 0x71, 0xff, 0x21, 0x5d, - 0x2b, 0x1d, 0x25, 0xfb, 0xa5, 0x05, 0xd5, 0x81, 0xd3, 0x67, 0x4f, 0x9b, 0xf2, 0x8d, 0x25, 0xf1, - 0x5d, 0xb0, 0xa7, 0x7f, 0x1a, 0x4e, 0x66, 0x56, 0xcb, 0xa5, 0x42, 0xd0, 0xbc, 0x8b, 0x83, 0x8d, - 0x47, 0xd8, 0x0e, 0xc3, 0x67, 0xf5, 0x07, 0xa5, 0x58, 0xa0, 0x4e, 0x8b, 0x0e, 0x91, 0x35, 0x84, - 0xba, 0xb0, 0x14, 0xa1, 0x60, 0x42, 0xcd, 0x1e, 0x1a, 0x64, 0x4f, 0x74, 0x66, 0x9f, 0x28, 0xd2, - 0x4a, 0xe8, 0xfb, 0x82, 0xd1, 0x13, 0x2a, 0x21, 0x2c, 0x71, 0xce, 0x5c, 0x4e, 0x9e, 0x33, 0xbf, - 0x0b, 0x28, 0x1e, 0x8a, 0xf3, 0xb5, 0x79, 0x65, 0x82, 0x57, 0x63, 0x9a, 0x6e, 0xf2, 0x7d, 0xa3, - 0x9c, 0xb7, 0x5f, 0xa6, 0x8f, 0xf4, 0xf6, 0x8b, 0x7a, 0x06, 0x4e, 0x91, 0x00, 0xfb, 0x3e, 0x0e, - 0x3c, 0xab, 0xbf, 0x8e, 0xfd, 0xbe, 0x67, 0xb9, 0x81, 0x13, 0x26, 0xb2, 0xa8, 0x3a, 0x9c, 0xce, - 0x29, 0xe7, 0xea, 0x7e, 0x13, 0xea, 0x66, 0x04, 0xce, 0xda, 0xe7, 0x48, 0xd2, 0x6a, 0x71, 0x02, - 0xf5, 0x7d, 0x68, 0x26, 0x11, 0x32, 0xf3, 0x5e, 0x11, 0x54, 0xf6, 0xf1, 0xc0, 0x15, 0x17, 0x79, - 0xc8, 0x6f, 0xa2, 0x75, 0xb6, 0x76, 0x79, 0x88, 0x0f, 0xc4, 0x3e, 0x78, 0x8d, 0x42, 0xbe, 0x88, - 0x0f, 0xc2, 0xb6, 0x49, 0x8f, 0x11, 0x78, 0x56, 0x3f, 0xd9, 0xb6, 0x8c, 0xf2, 0xa8, 0x6d, 0xa4, - 0xdb, 0x86, 0x0c, 0xcc, 0xdb, 0x76, 0x3a, 0xf7, 0xa1, 0x03, 0x4a, 0x0b, 0xae, 0x63, 0xf2, 0xdf, - 0xea, 0x9f, 0x28, 0xb0, 0x98, 0xc2, 0x98, 0xf0, 0x6c, 0xe3, 0x05, 0x98, 0x15, 0xf5, 0x96, 0xd2, - 0xc9, 0xa1, 0x8c, 0x97, 0x26, 0x50, 0x50, 0x07, 0x16, 0x23, 0x8b, 0x16, 0x74, 0xe5, 0x74, 0x5f, - 0xc4, 0x17, 0x2e, 0x54, 0xdc, 0x66, 0x3f, 0x01, 0x51, 0xfb, 0xd0, 0x4c, 0x62, 0x4d, 0x32, 0xa6, - 0x0e, 0x25, 0xaf, 0xfa, 0xf7, 0x0a, 0xcc, 0x30, 0x58, 0x66, 0x67, 0x4b, 0xd3, 0x41, 0x29, 0x39, - 0x1d, 0xbc, 0x06, 0x75, 0xc6, 0x47, 0x0f, 0xaf, 0x71, 0xcd, 0xcb, 0xdb, 0xbb, 0x8c, 0x35, 0x1d, - 0xad, 0x30, 0x0c, 0x7f, 0x93, 0x66, 0x30, 0x7b, 0xa1, 0x2b, 0x13, 0x91, 0x02, 0x5c, 0xa7, 0x30, - 0xea, 0x72, 0x49, 0xc8, 0xcc, 0xd7, 0x30, 0x63, 0x7c, 0x33, 0xdf, 0x87, 0x5a, 0xa6, 0x4f, 0xeb, - 0xa5, 0x36, 0x38, 0xd5, 0x6d, 0xfa, 0xf6, 0x5d, 0x7a, 0x63, 0x12, 0x7d, 0x41, 0x3e, 0x24, 0x7f, - 0x26, 0x75, 0xc2, 0x2c, 0x91, 0x8d, 0x3c, 0xf6, 0x04, 0x34, 0xa3, 0x51, 0x3f, 0x80, 0x13, 0xb9, - 0x38, 0xe8, 0x8d, 0xf0, 0xa1, 0x51, 0xd3, 0xb3, 0x1e, 0xf1, 0x8d, 0x85, 0x79, 0xf9, 0x51, 0x83, - 0x35, 0x8a, 0xb0, 0x4e, 0xcb, 0xc5, 0x13, 0xa4, 0xec, 0xeb, 0xd2, 0xb3, 0x50, 0x15, 0xcf, 0x73, - 0xa3, 0x59, 0x28, 0x6f, 0xaf, 0x75, 0x9b, 0x53, 0xe4, 0xc7, 0xce, 0x7a, 0xb7, 0xa9, 0xa0, 0x2a, - 0x54, 0x7a, 0x6b, 0xdb, 0xdd, 0x66, 0xe9, 0xd2, 0x10, 0x9a, 0xc9, 0x17, 0xaa, 0xd1, 0x0a, 0x1c, - 0xeb, 0x6a, 0x5b, 0xdd, 0xf6, 0xdd, 0xf6, 0x76, 0x67, 0x6b, 0x53, 0xef, 0x6a, 0x9d, 0xf7, 0xda, - 0xdb, 0x1b, 0xcd, 0x29, 0x74, 0x1e, 0x4e, 0xc7, 0x0b, 0xde, 0xd9, 0xea, 0x6d, 0xeb, 0xdb, 0x5b, - 0xfa, 0xda, 0xd6, 0xe6, 0x76, 0xbb, 0xb3, 0xb9, 0xa1, 0x35, 0x15, 0x74, 0x1a, 0x4e, 0xc4, 0x51, - 0xee, 0x74, 0xd6, 0x3b, 0xda, 0xc6, 0x1a, 0xf9, 0xdd, 0xbe, 0xd7, 0x2c, 0x5d, 0x7a, 0x03, 0x1a, - 0xd2, 0xc5, 0x15, 0x22, 0x52, 0x77, 0x6b, 0xbd, 0x39, 0x85, 0x1a, 0x50, 0x8b, 0xf3, 0xa9, 0x42, - 0x65, 0x73, 0x6b, 0x7d, 0xa3, 0x59, 0x42, 0x00, 0x33, 0xdb, 0x6d, 0xed, 0xee, 0xc6, 0x76, 0xb3, - 0x7c, 0xe9, 0x56, 0xf2, 0x51, 0x0d, 0x8c, 0x16, 0xa1, 0xd1, 0x6b, 0x6f, 0xae, 0xdf, 0xd9, 0xfa, - 0x8a, 0xae, 0x6d, 0xb4, 0xd7, 0xdf, 0x6f, 0x4e, 0xa1, 0x25, 0x68, 0x0a, 0xd0, 0xe6, 0xd6, 0x36, - 0x83, 0x2a, 0x97, 0x1e, 0x26, 0xd6, 0xac, 0x18, 0x1d, 0x87, 0xc5, 0xb0, 0x4a, 0x7d, 0x4d, 0xdb, - 0x68, 0x6f, 0x6f, 0x10, 0x49, 0x24, 0xb0, 0xb6, 0xb3, 0xb9, 0xd9, 0xd9, 0xbc, 0xdb, 0x54, 0x08, - 0xd7, 0x08, 0xbc, 0xf1, 0x95, 0x0e, 0x41, 0x2e, 0xc9, 0xc8, 0x3b, 0x9b, 0x5f, 0xdc, 0xdc, 0xfa, - 0xf2, 0x66, 0xb3, 0x7c, 0xe9, 0x57, 0xe3, 0x39, 0x15, 0xd1, 0xbc, 0x72, 0x12, 0x56, 0x52, 0x35, - 0xea, 0x1b, 0xef, 0x6d, 0x6c, 0x6e, 0x37, 0xa7, 0xe4, 0xc2, 0xde, 0x76, 0x5b, 0x8b, 0x0a, 0x95, - 0x64, 0xe1, 0x56, 0xb7, 0x1b, 0x16, 0x96, 0xe4, 0xc2, 0xf5, 0x8d, 0x7b, 0x1b, 0x11, 0x65, 0xf9, - 0xd2, 0xd3, 0x00, 0xd1, 0xf8, 0x41, 0x75, 0x98, 0x5d, 0xdb, 0xda, 0xd9, 0xdc, 0xde, 0xd0, 0x9a, - 0x53, 0xa8, 0x06, 0xd3, 0x77, 0xdb, 0x3b, 0x77, 0x37, 0x9a, 0xca, 0xa5, 0x8b, 0x30, 0x17, 0xb7, - 0x26, 0x82, 0xd7, 0x7b, 0xbf, 0xb7, 0xbd, 0x71, 0x9f, 0x68, 0x64, 0x0e, 0xaa, 0x6b, 0x77, 0xb5, - 0xad, 0x9d, 0xee, 0xdb, 0xbd, 0xa6, 0x72, 0xfd, 0xff, 0x96, 0xc2, 0x07, 0x75, 0x7b, 0xd8, 0xa3, - 0xd7, 0x05, 0xd6, 0x61, 0x56, 0x3c, 0x68, 0x2f, 0xed, 0xda, 0xc8, 0x0f, 0xf0, 0xb7, 0x4e, 0x66, - 0x96, 0xf1, 0xb8, 0x60, 0x0a, 0xbd, 0x47, 0xf7, 0xdc, 0x63, 0x4f, 0x5a, 0x9d, 0x4b, 0xec, 0x73, - 0xa7, 0x5e, 0xce, 0x6a, 0x9d, 0x2f, 0xc0, 0x08, 0xf9, 0xbe, 0x0f, 0xf3, 0xf2, 0xdb, 0x91, 0xe8, - 0xbc, 0xbc, 0x1f, 0x9e, 0xf1, 0x2c, 0x65, 0x4b, 0x2d, 0x42, 0x09, 0x59, 0xeb, 0xd0, 0x4c, 0xbe, - 0x1d, 0x89, 0xa4, 0x34, 0x93, 0x9c, 0xa7, 0x29, 0x5b, 0x4f, 0x17, 0x23, 0xc5, 0x2b, 0x48, 0x3d, - 0x89, 0xf8, 0x54, 0xf1, 0x23, 0x73, 0x19, 0x15, 0xe4, 0xbd, 0x44, 0xc7, 0x94, 0x23, 0xcf, 0x9a, - 0x28, 0xf1, 0x0a, 0x61, 0xc6, 0x83, 0x65, 0xb2, 0x72, 0xb2, 0x1f, 0xab, 0x52, 0xa7, 0xd0, 0x2f, - 0xc1, 0x42, 0x22, 0x17, 0x1c, 0x49, 0x84, 0xd9, 0x29, 0xee, 0xad, 0xa7, 0x0a, 0x71, 0xe4, 0x5e, - 0x8d, 0xe7, 0x7b, 0x27, 0x7b, 0x35, 0x23, 0x8f, 0x3c, 0xd9, 0xab, 0x99, 0xe9, 0xe2, 0xd4, 0x10, - 0xa5, 0xdc, 0x6e, 0xd9, 0x10, 0xb3, 0x72, 0xc9, 0x5b, 0xe7, 0x0b, 0x30, 0xe2, 0x0a, 0x49, 0x64, - 0x77, 0xcb, 0x0a, 0xc9, 0xce, 0x1b, 0x6f, 0x3d, 0x55, 0x88, 0x93, 0xec, 0xc9, 0x28, 0xab, 0x34, - 0xdd, 0x93, 0xa9, 0xcc, 0xe6, 0x74, 0x4f, 0xa6, 0x93, 0x52, 0x79, 0x4f, 0x26, 0xf2, 0x40, 0xd5, - 0xc2, 0x1c, 0xb5, 0xac, 0x9e, 0xcc, 0xce, 0x63, 0x53, 0xa7, 0xd0, 0x63, 0x58, 0xcd, 0x4b, 0x45, - 0x42, 0xcf, 0x1f, 0x22, 0x63, 0xaa, 0xf5, 0xc2, 0x64, 0xc8, 0x61, 0xc5, 0x18, 0x50, 0x7a, 0xf9, - 0x84, 0x9e, 0x91, 0xd5, 0x9d, 0xb3, 0x3c, 0x6b, 0x3d, 0x3b, 0x0e, 0x2d, 0xac, 0xe6, 0x2e, 0x54, - 0x45, 0x92, 0x13, 0x92, 0x5c, 0x60, 0x22, 0xb9, 0xaa, 0x75, 0x2a, 0xbb, 0x30, 0x64, 0xf4, 0x05, - 0xa8, 0x10, 0x28, 0x5a, 0x49, 0xe2, 0x09, 0x06, 0xab, 0xe9, 0x82, 0x90, 0xb8, 0x0d, 0x33, 0x2c, - 0x7b, 0x07, 0x49, 0xc7, 0x87, 0x52, 0x76, 0x51, 0xab, 0x95, 0x55, 0x14, 0xb2, 0xe8, 0xb2, 0x7f, - 0x0f, 0xc2, 0x93, 0x71, 0xd0, 0x99, 0xe4, 0xab, 0xd1, 0x72, 0xd6, 0x4f, 0xeb, 0x6c, 0x6e, 0x79, - 0xdc, 0x66, 0x13, 0xbb, 0xa4, 0xe7, 0x0b, 0x76, 0xfd, 0xb3, 0x6c, 0x36, 0xfb, 0x2c, 0x81, 0x75, - 0x6e, 0xfa, 0xac, 0x01, 0x3d, 0x93, 0x6b, 0xef, 0x52, 0x15, 0xcf, 0x8e, 0x43, 0x8b, 0x0f, 0x8d, - 0xe4, 0xf3, 0x4f, 0x6a, 0xd1, 0xd3, 0x6c, 0x59, 0x43, 0x23, 0xe7, 0xc9, 0x37, 0x75, 0x0a, 0xed, - 0xc3, 0xb1, 0x8c, 0x37, 0xe1, 0xd0, 0xb3, 0xf9, 0xfe, 0x57, 0xaa, 0xe5, 0xb9, 0xb1, 0x78, 0xf1, - 0x9a, 0x32, 0x4e, 0xe0, 0xe5, 0x9a, 0xf2, 0x53, 0x00, 0xe4, 0x9a, 0x8a, 0x8e, 0xf2, 0xa9, 0x21, - 0x72, 0x1f, 0x72, 0x22, 0xeb, 0x58, 0x3a, 0xc3, 0x10, 0x53, 0x1e, 0x63, 0x1f, 0x8e, 0x65, 0x6c, - 0x31, 0xc8, 0xc2, 0xe6, 0x6f, 0x7d, 0xc8, 0xc2, 0x16, 0xed, 0x55, 0x4c, 0xa1, 0x0f, 0x00, 0xdd, - 0xc5, 0x81, 0x1c, 0xca, 0xf9, 0x48, 0x1a, 0xa8, 0xc9, 0xdd, 0x8c, 0x1c, 0xfb, 0x94, 0xb6, 0x35, - 0xd4, 0xa9, 0x6b, 0x0a, 0xb2, 0xd9, 0x75, 0x93, 0xd4, 0x62, 0x1c, 0x5d, 0x48, 0x76, 0x5b, 0xde, - 0x7a, 0xbe, 0x75, 0x71, 0x02, 0xcc, 0xb0, 0x2d, 0x76, 0xf2, 0xfd, 0x51, 0xb1, 0x1e, 0xbc, 0x90, - 0x6f, 0x26, 0xf2, 0x1a, 0x3b, 0x5d, 0x5f, 0xee, 0x6a, 0x3b, 0x8c, 0xe7, 0x62, 0xc6, 0x74, 0x2e, - 0x3f, 0x1f, 0x24, 0x27, 0x9e, 0xcb, 0x32, 0xa0, 0xeb, 0xbf, 0x5b, 0x86, 0x39, 0x96, 0x37, 0xc3, - 0xc3, 0xcf, 0xfb, 0x00, 0x51, 0x0a, 0x1a, 0x3a, 0x9d, 0x94, 0x51, 0xca, 0xeb, 0x6b, 0x9d, 0xc9, - 0x2b, 0x8e, 0xbb, 0xb9, 0x58, 0x6a, 0x97, 0xec, 0xe6, 0xd2, 0x99, 0x6a, 0xb2, 0x9b, 0xcb, 0xc8, - 0x09, 0x53, 0xa7, 0xd0, 0xbb, 0x50, 0x0b, 0x33, 0x89, 0x64, 0xe3, 0x49, 0xa6, 0x44, 0xb5, 0x4e, - 0xe7, 0x94, 0xc6, 0xa5, 0x8b, 0x25, 0x08, 0xc9, 0xd2, 0xa5, 0x93, 0x8f, 0x64, 0xe9, 0xb2, 0x32, - 0x8b, 0xa2, 0xf6, 0xb2, 0x23, 0xfc, 0x8c, 0xf6, 0x4a, 0x19, 0x1d, 0x19, 0xed, 0x95, 0xcf, 0xfe, - 0xd5, 0xa9, 0x3b, 0xb7, 0x7f, 0xf8, 0x93, 0x33, 0xca, 0x8f, 0x7e, 0x72, 0x66, 0xea, 0x57, 0x3e, - 0x3a, 0xa3, 0xfc, 0xf0, 0xa3, 0x33, 0xca, 0x3f, 0x7f, 0x74, 0x46, 0xf9, 0xf1, 0x47, 0x67, 0x94, - 0x6f, 0xfd, 0xe7, 0x99, 0xa9, 0x0f, 0xd4, 0x87, 0x37, 0xfc, 0x2b, 0x96, 0x73, 0xb5, 0xef, 0x59, - 0x97, 0x0d, 0xd7, 0xba, 0xea, 0x3e, 0xdc, 0xbb, 0x6a, 0xb8, 0x96, 0x7f, 0x95, 0xf3, 0xbd, 0xfa, - 0xe8, 0xc5, 0x07, 0x33, 0xf4, 0x5f, 0x4a, 0xbd, 0xf4, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x15, - 0xf3, 0x86, 0xa5, 0x0c, 0x6c, 0x00, 0x00, + // 6821 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7d, 0x4d, 0x6c, 0x1c, 0xc9, + 0x75, 0x30, 0x7b, 0x66, 0x48, 0xce, 0xbc, 0xe1, 0x90, 0xc3, 0x12, 0x45, 0x52, 0xa3, 0xff, 0xde, + 0x3f, 0x49, 0xbb, 0xfa, 0x59, 0xed, 0x9f, 0x24, 0xef, 0x8f, 0x46, 0x24, 0x57, 0x3b, 0x6b, 0x89, + 0x1c, 0xf7, 0x90, 0x6b, 0xef, 0xfa, 0x83, 0xfb, 0x6b, 0x4d, 0x17, 0xc9, 0x5e, 0xcd, 0x74, 0xb7, + 0xbb, 0x7b, 0x24, 0xd1, 0xa7, 0x9c, 0x82, 0xc4, 0x27, 0x03, 0x89, 0x63, 0xc4, 0x08, 0x12, 0xe4, + 0x10, 0x24, 0xb7, 0xfc, 0x00, 0x49, 0x1c, 0xe4, 0x0f, 0x30, 0x12, 0xc3, 0x09, 0x10, 0x20, 0x87, + 0x04, 0xf0, 0x21, 0x40, 0xec, 0x4d, 0x80, 0x00, 0x39, 0xfb, 0x90, 0x53, 0x1c, 0xd4, 0x5f, 0x77, + 0x57, 0xff, 0xcd, 0x90, 0xbb, 0xde, 0x5d, 0x9f, 0x38, 0xfd, 0xea, 0xbd, 0x57, 0xaf, 0x5e, 0xbd, + 0x7a, 0xf5, 0xaa, 0xea, 0x55, 0x11, 0x6a, 0x86, 0x6b, 0x5d, 0x71, 0x3d, 0x27, 0x70, 0x10, 0x78, + 0x23, 0x3b, 0xb0, 0x86, 0xf8, 0xca, 0xa3, 0x17, 0x5b, 0x97, 0xf7, 0xac, 0x60, 0x7f, 0xf4, 0xe0, + 0x4a, 0xdf, 0x19, 0x5e, 0xdd, 0x73, 0xf6, 0x9c, 0xab, 0x14, 0xe5, 0xc1, 0x68, 0x97, 0x7e, 0xd1, + 0x0f, 0xfa, 0x8b, 0x91, 0xaa, 0x97, 0x60, 0xfe, 0x3d, 0xec, 0xf9, 0x96, 0x63, 0x6b, 0xf8, 0xeb, + 0x23, 0xec, 0x07, 0x68, 0x15, 0x66, 0x1f, 0x31, 0xc8, 0xaa, 0x72, 0x4e, 0xb9, 0x50, 0xd3, 0xc4, + 0xa7, 0xfa, 0xfb, 0x0a, 0x2c, 0x84, 0xc8, 0xbe, 0xeb, 0xd8, 0x3e, 0xce, 0xc7, 0x46, 0xe7, 0x61, + 0x8e, 0x8b, 0xa5, 0xdb, 0xc6, 0x10, 0xaf, 0x96, 0x68, 0x71, 0x9d, 0xc3, 0x36, 0x8d, 0x21, 0x46, + 0xcf, 0xc1, 0x82, 0x40, 0x11, 0x4c, 0xca, 0x14, 0x6b, 0x9e, 0x83, 0x79, 0x6d, 0xe8, 0x0a, 0x1c, + 0x13, 0x88, 0x86, 0x6b, 0x85, 0xc8, 0x15, 0x8a, 0xbc, 0xc8, 0x8b, 0xda, 0xae, 0xc5, 0xf1, 0xd5, + 0xaf, 0x42, 0x6d, 0x7d, 0xb3, 0xb7, 0xe6, 0xd8, 0xbb, 0xd6, 0x1e, 0x11, 0xd1, 0xc7, 0x1e, 0xa1, + 0x59, 0x55, 0xce, 0x95, 0x89, 0x88, 0xfc, 0x13, 0xb5, 0xa0, 0xea, 0x63, 0xc3, 0xeb, 0xef, 0x63, + 0x7f, 0xb5, 0x44, 0x8b, 0xc2, 0x6f, 0x42, 0xe5, 0xb8, 0x81, 0xe5, 0xd8, 0xfe, 0x6a, 0x99, 0x51, + 0xf1, 0x4f, 0xf5, 0xb7, 0x14, 0xa8, 0x77, 0x1d, 0x2f, 0xb8, 0x6f, 0xb8, 0xae, 0x65, 0xef, 0xa1, + 0x6b, 0x50, 0xa5, 0xba, 0xec, 0x3b, 0x03, 0xaa, 0x83, 0xf9, 0xeb, 0x4b, 0x57, 0xa2, 0x0e, 0xb9, + 0xd2, 0xe5, 0x65, 0x5a, 0x88, 0x85, 0x9e, 0x81, 0xf9, 0xbe, 0x63, 0x07, 0x86, 0x65, 0x63, 0x4f, + 0x77, 0x1d, 0x2f, 0xa0, 0xca, 0x99, 0xd6, 0x1a, 0x21, 0x94, 0xf0, 0x47, 0x27, 0xa1, 0xb6, 0xef, + 0xf8, 0x01, 0xc3, 0x28, 0x53, 0x8c, 0x2a, 0x01, 0xd0, 0xc2, 0x15, 0x98, 0xa5, 0x85, 0x96, 0xcb, + 0xd5, 0x30, 0x43, 0x3e, 0x3b, 0xae, 0xfa, 0xfd, 0x12, 0x4c, 0xdf, 0x77, 0x46, 0x76, 0x90, 0xa8, + 0xc6, 0x08, 0xf6, 0x79, 0x17, 0xc5, 0xaa, 0x31, 0x82, 0xfd, 0xa8, 0x1a, 0x82, 0xc1, 0x7a, 0x89, + 0x55, 0x43, 0x0a, 0x5b, 0x50, 0xf5, 0xb0, 0x61, 0x3a, 0xf6, 0xe0, 0x80, 0x8a, 0x50, 0xd5, 0xc2, + 0x6f, 0xd2, 0x7d, 0x3e, 0x1e, 0x58, 0xf6, 0xe8, 0x89, 0xee, 0xe1, 0x81, 0xf1, 0x00, 0x0f, 0xa8, + 0x28, 0x55, 0x6d, 0x9e, 0x83, 0x35, 0x06, 0x45, 0x6f, 0x42, 0xdd, 0xf5, 0x1c, 0xd7, 0xd8, 0x33, + 0x88, 0x06, 0x57, 0xa7, 0xa9, 0x92, 0x4e, 0xc5, 0x95, 0x44, 0x05, 0xee, 0x46, 0x38, 0x5a, 0x9c, + 0x00, 0xbd, 0x06, 0xf5, 0x91, 0x65, 0x72, 0x7d, 0xfb, 0xab, 0x33, 0xe7, 0xca, 0x17, 0xea, 0xd7, + 0x8f, 0xc7, 0xe9, 0x3b, 0xeb, 0xbc, 0x54, 0x8b, 0x63, 0x12, 0xc2, 0xbd, 0x18, 0xe1, 0x6c, 0x21, + 0x61, 0x0c, 0x53, 0xd5, 0xa1, 0x16, 0x96, 0x44, 0xaa, 0x36, 0xa9, 0x02, 0x1b, 0x5c, 0xd5, 0x26, + 0x31, 0xf1, 0x48, 0xc1, 0x96, 0x49, 0x95, 0xd7, 0xd0, 0xea, 0x21, 0xac, 0x63, 0xa2, 0x65, 0x98, + 0x19, 0x60, 0x7b, 0x2f, 0xd8, 0xa7, 0xda, 0x6b, 0x68, 0xfc, 0x4b, 0xfd, 0x75, 0x05, 0x1a, 0x3b, + 0x3e, 0xf6, 0xc8, 0x38, 0xf0, 0x5d, 0xa3, 0x8f, 0xd1, 0x65, 0xa8, 0x0c, 0x1d, 0x13, 0x73, 0x13, + 0x3a, 0x11, 0x17, 0x32, 0x44, 0xba, 0xef, 0x98, 0x58, 0xa3, 0x68, 0xe8, 0x22, 0x54, 0x46, 0x96, + 0xc9, 0xec, 0x36, 0xb7, 0x4d, 0x14, 0x85, 0xa0, 0xee, 0x11, 0xd4, 0x72, 0x21, 0x2a, 0x41, 0x51, + 0x7f, 0xa6, 0xc0, 0x42, 0x58, 0xdb, 0x16, 0x35, 0x78, 0xf4, 0x12, 0xcc, 0xda, 0x38, 0x78, 0xec, + 0x78, 0x0f, 0xc7, 0xcb, 0x26, 0x30, 0xd1, 0xf3, 0x50, 0x76, 0xb9, 0x46, 0x0a, 0x09, 0x08, 0x16, + 0x41, 0xb6, 0xdc, 0x3e, 0xd5, 0x50, 0x31, 0xb2, 0xe5, 0xf6, 0x89, 0xb9, 0x06, 0x86, 0xb7, 0x87, + 0x69, 0x7f, 0x30, 0xd3, 0xaf, 0x32, 0x40, 0xc7, 0x44, 0xb7, 0x61, 0x7e, 0xe4, 0x63, 0xcf, 0xf6, + 0x75, 0x31, 0x78, 0x89, 0xb1, 0xd5, 0x65, 0xa6, 0x92, 0xde, 0xb5, 0x06, 0x23, 0xd8, 0xe2, 0xa3, + 0x5b, 0x05, 0xe8, 0xd8, 0xc1, 0xab, 0x2f, 0xbf, 0x67, 0x0c, 0x46, 0x18, 0x2d, 0xc1, 0xf4, 0x23, + 0xf2, 0x83, 0xb6, 0xbc, 0xac, 0xb1, 0x0f, 0xf5, 0xaf, 0x2b, 0x70, 0xf2, 0x1e, 0x31, 0xf0, 0x9e, + 0x61, 0x9b, 0x0f, 0x9c, 0x27, 0x3d, 0xdc, 0x1f, 0x79, 0x56, 0x70, 0xb0, 0xe6, 0xd8, 0x01, 0x7e, + 0x12, 0xa0, 0x77, 0x60, 0xd1, 0x16, 0xfc, 0x43, 0x41, 0x14, 0x2a, 0xc8, 0xc9, 0xcc, 0xd6, 0xb1, + 0xca, 0xb5, 0xa6, 0x2d, 0x03, 0x7c, 0x74, 0x27, 0x1a, 0x62, 0x82, 0x4f, 0x29, 0xdd, 0xa0, 0xde, + 0x06, 0x95, 0x86, 0x73, 0x11, 0xa3, 0x4f, 0xf0, 0x78, 0x15, 0x88, 0xd3, 0xd5, 0x0d, 0x5f, 0x27, + 0x2d, 0xa5, 0x5a, 0xae, 0x5f, 0x5f, 0x96, 0xac, 0x20, 0x6c, 0xb0, 0x56, 0xf3, 0x46, 0x76, 0xdb, + 0x27, 0x1a, 0x42, 0x37, 0xa8, 0x03, 0x27, 0x74, 0x7b, 0x9e, 0x33, 0x72, 0x57, 0xab, 0x85, 0x84, + 0x40, 0x09, 0xef, 0x12, 0x4c, 0xea, 0xd7, 0xb9, 0x93, 0xd0, 0x3d, 0xc7, 0x09, 0x76, 0x7d, 0xe1, + 0x18, 0x04, 0x58, 0xa3, 0x50, 0x74, 0x15, 0x8e, 0xf9, 0x23, 0xd7, 0x1d, 0xe0, 0x21, 0xb6, 0x03, + 0x63, 0xc0, 0x2a, 0x22, 0x7d, 0x56, 0xbe, 0x50, 0xd6, 0x50, 0xbc, 0x88, 0x32, 0xf6, 0xd1, 0x19, + 0x00, 0xd7, 0xb3, 0x1e, 0x59, 0x03, 0xbc, 0x87, 0xcd, 0xd5, 0x19, 0xca, 0x34, 0x06, 0x41, 0xaf, + 0x10, 0x5f, 0xdf, 0xef, 0x3b, 0x43, 0x77, 0xb5, 0x96, 0xd6, 0xb7, 0xe8, 0xa7, 0xae, 0xe7, 0xec, + 0x5a, 0x03, 0xac, 0x09, 0x5c, 0xf4, 0x1a, 0x54, 0x0d, 0xd7, 0x35, 0xbc, 0xa1, 0xe3, 0xad, 0xc2, + 0x78, 0xba, 0x10, 0x19, 0xbd, 0x0c, 0x4b, 0x9c, 0x87, 0xee, 0xb2, 0x42, 0xe6, 0x46, 0x67, 0x89, + 0x5d, 0xde, 0x29, 0xad, 0x2a, 0x1a, 0xe2, 0xe5, 0x9c, 0x96, 0x38, 0x55, 0xf5, 0xef, 0x14, 0x58, + 0x48, 0xf0, 0x44, 0xef, 0xc2, 0x9c, 0xe0, 0x10, 0x1c, 0xb8, 0xc2, 0x0d, 0x3c, 0x57, 0x20, 0xc6, + 0x15, 0xfe, 0x77, 0xfb, 0xc0, 0xc5, 0xd4, 0x5f, 0x8a, 0x0f, 0xf4, 0x14, 0x34, 0x06, 0x4e, 0xdf, + 0x18, 0x50, 0xaf, 0xe5, 0xe1, 0x5d, 0xee, 0xd5, 0xe7, 0x42, 0xa0, 0x86, 0x77, 0xd5, 0xdb, 0x50, + 0x8f, 0x31, 0x40, 0x08, 0xe6, 0x35, 0x56, 0xd5, 0x3a, 0xde, 0x35, 0x46, 0x83, 0xa0, 0x39, 0x85, + 0xe6, 0x01, 0x76, 0xec, 0x3e, 0x99, 0x45, 0x6d, 0x6c, 0x36, 0x15, 0xd4, 0x80, 0xda, 0x3d, 0xc1, + 0xa2, 0x59, 0x52, 0xbf, 0x5b, 0x86, 0xe3, 0xd4, 0xf0, 0xba, 0x8e, 0xc9, 0x47, 0x02, 0x9f, 0x72, + 0x9f, 0x82, 0x46, 0x9f, 0xf6, 0xa5, 0xee, 0x1a, 0x1e, 0xb6, 0x03, 0x3e, 0xf1, 0xcc, 0x31, 0x60, + 0x97, 0xc2, 0x90, 0x06, 0x4d, 0x9f, 0xb7, 0x48, 0xef, 0xb3, 0x91, 0xc3, 0x8d, 0x5b, 0x6a, 0x75, + 0xc1, 0x40, 0xd3, 0x16, 0xfc, 0xd4, 0xc8, 0x9b, 0xf5, 0x0f, 0xfc, 0x7e, 0x30, 0x10, 0xde, 0xee, + 0x4a, 0x8a, 0x55, 0x52, 0xd8, 0x2b, 0x3d, 0x46, 0xb0, 0x61, 0x07, 0xde, 0x81, 0x26, 0xc8, 0xd1, + 0x5b, 0x50, 0x75, 0x1e, 0x61, 0x6f, 0x1f, 0x1b, 0xcc, 0xcb, 0xd4, 0xaf, 0x3f, 0x95, 0x62, 0xb5, + 0x26, 0x1c, 0xbd, 0x86, 0x7d, 0x67, 0xe4, 0xf5, 0xb1, 0xaf, 0x85, 0x44, 0xa8, 0x0d, 0x35, 0x4f, + 0x80, 0xb9, 0x17, 0x9a, 0x88, 0x43, 0x44, 0xd5, 0xba, 0x05, 0x73, 0x71, 0xe1, 0x50, 0x13, 0xca, + 0x0f, 0xf1, 0x01, 0x57, 0x26, 0xf9, 0x19, 0xf9, 0x27, 0xd6, 0xc3, 0xec, 0xe3, 0x56, 0xe9, 0x86, + 0xa2, 0x7a, 0x80, 0xa2, 0x96, 0xde, 0xc7, 0x81, 0x61, 0x1a, 0x81, 0x81, 0x10, 0x54, 0x68, 0x30, + 0xc6, 0x58, 0xd0, 0xdf, 0x84, 0xeb, 0x88, 0xbb, 0xea, 0x9a, 0x46, 0x7e, 0xa2, 0x53, 0x50, 0x0b, + 0x3d, 0x11, 0x8f, 0xc8, 0x22, 0x00, 0x89, 0x8c, 0x8c, 0x20, 0xc0, 0x43, 0x37, 0xa0, 0x8a, 0x69, + 0x68, 0xe2, 0x53, 0xfd, 0xd5, 0x69, 0x68, 0xa6, 0x6c, 0xe1, 0x16, 0x54, 0x87, 0xbc, 0x7a, 0xee, + 0x03, 0xcf, 0x48, 0xe1, 0x51, 0x4a, 0x48, 0x2d, 0xc4, 0x27, 0xd1, 0x07, 0xb1, 0xb5, 0x58, 0xfc, + 0x18, 0x7e, 0x33, 0x23, 0xdf, 0xd3, 0x4d, 0xcb, 0xc3, 0xfd, 0xc0, 0xf1, 0x0e, 0xb8, 0xa0, 0x73, + 0x03, 0x67, 0x6f, 0x5d, 0xc0, 0xd0, 0xcb, 0x00, 0xa6, 0xed, 0xeb, 0xd4, 0x86, 0xf7, 0x78, 0x3f, + 0x4a, 0x13, 0x60, 0x18, 0x26, 0x6a, 0x35, 0xd3, 0xf6, 0xb9, 0xc8, 0xaf, 0x43, 0x83, 0xc4, 0x5c, + 0xfa, 0x50, 0x04, 0x0e, 0xd3, 0xd4, 0x96, 0x56, 0x64, 0xb9, 0xc3, 0x08, 0x50, 0x9b, 0x73, 0xa3, + 0x0f, 0x1f, 0xdd, 0x86, 0x19, 0x1a, 0xf6, 0x88, 0x40, 0xe5, 0x42, 0x76, 0x73, 0xb9, 0xf5, 0xdd, + 0xa3, 0xa8, 0xcc, 0xf8, 0x38, 0x1d, 0xda, 0x82, 0xba, 0x61, 0xdb, 0x4e, 0x60, 0x30, 0x8f, 0xcf, + 0xc2, 0x96, 0xcb, 0x85, 0x6c, 0xda, 0x11, 0x3e, 0xe3, 0x15, 0xe7, 0x80, 0x5e, 0x83, 0x69, 0x3a, + 0x25, 0x70, 0x1f, 0x7e, 0x7e, 0xec, 0xa0, 0xd0, 0x18, 0x3e, 0x7a, 0x03, 0x66, 0x1f, 0x5b, 0xb6, + 0xe9, 0x3c, 0xf6, 0xb9, 0x3f, 0x95, 0x4c, 0xf8, 0xcb, 0xac, 0x28, 0x45, 0x2c, 0x68, 0x5a, 0x37, + 0xa1, 0x1e, 0x6b, 0xdf, 0x61, 0xec, 0xb7, 0xf5, 0x26, 0x34, 0x93, 0x6d, 0x3a, 0x94, 0xfd, 0x8f, + 0x60, 0x49, 0x1b, 0xd9, 0x91, 0x68, 0x62, 0x79, 0xf3, 0x32, 0xcc, 0x70, 0x6b, 0x60, 0xc6, 0x78, + 0xaa, 0x48, 0xad, 0x1a, 0xc7, 0x8d, 0xaf, 0x54, 0xf6, 0x0d, 0xdb, 0x1c, 0x60, 0x8f, 0xd7, 0x28, + 0x56, 0x2a, 0xef, 0x30, 0xa8, 0xfa, 0x06, 0x1c, 0x4f, 0x54, 0xcb, 0x17, 0x4a, 0x4f, 0xc3, 0xbc, + 0xeb, 0x98, 0xba, 0xcf, 0xc0, 0x22, 0x96, 0xac, 0x11, 0xdb, 0x11, 0xb8, 0x1d, 0x93, 0x90, 0xf7, + 0x02, 0xc7, 0x4d, 0x8b, 0x3d, 0x19, 0xf9, 0x2a, 0x2c, 0x27, 0xc9, 0x59, 0xf5, 0xea, 0x5b, 0xb0, + 0xa2, 0xe1, 0xa1, 0xf3, 0x08, 0x1f, 0x95, 0x75, 0x0b, 0x56, 0xd3, 0x0c, 0x38, 0xf3, 0xf7, 0x61, + 0x25, 0x82, 0xf6, 0x02, 0x23, 0x18, 0xf9, 0x87, 0x62, 0xce, 0x57, 0x91, 0x0f, 0x1c, 0x9f, 0x75, + 0x64, 0x55, 0x13, 0x9f, 0xea, 0x0a, 0x4c, 0x77, 0x1d, 0xb3, 0xd3, 0x45, 0xf3, 0x50, 0xb2, 0x5c, + 0x4e, 0x5c, 0xb2, 0x5c, 0xb5, 0x1f, 0xaf, 0x73, 0x93, 0x45, 0x9d, 0xac, 0xea, 0x24, 0x2a, 0xba, + 0x01, 0xf3, 0x86, 0x69, 0x5a, 0xc4, 0x90, 0x8c, 0x81, 0x6e, 0xb9, 0x22, 0x68, 0x5e, 0x4c, 0x74, + 0x7d, 0xa7, 0xab, 0x35, 0x22, 0xc4, 0x8e, 0xeb, 0xab, 0x77, 0xa0, 0x16, 0x05, 0xe8, 0xaf, 0x44, + 0x2b, 0xc2, 0xd2, 0xf8, 0x58, 0x2e, 0x5c, 0x2e, 0x6e, 0xa6, 0x26, 0x49, 0x2e, 0xe6, 0x2b, 0x00, + 0xa1, 0x53, 0x15, 0xe1, 0xe1, 0xf1, 0x4c, 0x96, 0x5a, 0x0c, 0x51, 0xfd, 0xf7, 0x4a, 0xdc, 0xc9, + 0xc6, 0x9a, 0x6c, 0x86, 0x4d, 0x36, 0x25, 0xa7, 0x5b, 0x3a, 0xa4, 0xd3, 0x7d, 0x11, 0xa6, 0xfd, + 0xc0, 0x08, 0x30, 0x8f, 0xc7, 0x4f, 0x66, 0x13, 0x92, 0x8a, 0xb1, 0xc6, 0x30, 0xd1, 0x69, 0x80, + 0xbe, 0x87, 0x8d, 0x00, 0x9b, 0xba, 0xc1, 0x66, 0x85, 0xb2, 0x56, 0xe3, 0x90, 0x76, 0x40, 0xbc, + 0x88, 0x58, 0x41, 0x64, 0x4c, 0x84, 0x39, 0xdd, 0x18, 0xad, 0x25, 0x42, 0xef, 0x35, 0x33, 0xd6, + 0x7b, 0x71, 0x52, 0xee, 0xbd, 0x22, 0x4f, 0x3c, 0x5b, 0xe4, 0x89, 0x19, 0xd1, 0x24, 0x9e, 0xb8, + 0x5a, 0xe4, 0x89, 0x39, 0x9b, 0x62, 0x4f, 0x9c, 0xe1, 0x48, 0x6a, 0x59, 0x8e, 0xe4, 0xb3, 0x74, + 0x9d, 0x7f, 0x51, 0x82, 0xd5, 0xf4, 0x78, 0xe6, 0x7e, 0xec, 0x65, 0x98, 0xf1, 0x29, 0xa4, 0xd8, + 0x7f, 0x72, 0x2a, 0x8e, 0x8b, 0xee, 0x40, 0xc5, 0xb2, 0x77, 0x1d, 0x3e, 0xf0, 0xae, 0x14, 0xd2, + 0xf0, 0x9a, 0xae, 0x74, 0xec, 0x5d, 0x87, 0x69, 0x90, 0xd2, 0xa2, 0x7b, 0x70, 0x2c, 0x5c, 0x59, + 0xfb, 0x3a, 0x63, 0x8c, 0x45, 0x9c, 0x27, 0x59, 0x69, 0x18, 0x55, 0x71, 0x8e, 0x28, 0xa2, 0xeb, + 0x71, 0x32, 0x12, 0xe3, 0x10, 0x74, 0x3f, 0x30, 0x86, 0xae, 0xb0, 0xd8, 0x10, 0xd0, 0x7a, 0x0d, + 0x6a, 0x61, 0xf5, 0x87, 0xd2, 0x5d, 0x07, 0x96, 0x12, 0x63, 0x84, 0x2d, 0x24, 0xc3, 0x41, 0xa5, + 0x4c, 0x3a, 0xa8, 0xd4, 0x9f, 0x2a, 0xf1, 0x81, 0xfe, 0xb6, 0x35, 0x08, 0xb0, 0x97, 0x1a, 0xe8, + 0xaf, 0x0a, 0xbe, 0x6c, 0x94, 0x9f, 0x2b, 0xe0, 0xcb, 0xd6, 0x69, 0x7c, 0xc4, 0xbe, 0x07, 0xf3, + 0xd4, 0xc4, 0x75, 0x1f, 0x0f, 0x68, 0xac, 0xc4, 0xf5, 0x78, 0x35, 0x9b, 0x01, 0xab, 0x9d, 0x0d, + 0x91, 0x1e, 0xa7, 0x60, 0x7d, 0xd3, 0x18, 0xc4, 0x61, 0xad, 0xdb, 0x80, 0xd2, 0x48, 0x87, 0xd2, + 0xe0, 0x7d, 0xe2, 0x2f, 0xfd, 0x20, 0x73, 0xe6, 0xde, 0xa5, 0x62, 0x14, 0x5b, 0x1e, 0x13, 0x55, + 0xe3, 0xb8, 0xea, 0xbf, 0x96, 0x01, 0xa2, 0xc2, 0xcf, 0xb9, 0xa3, 0xbc, 0x15, 0x3a, 0x2c, 0x16, + 0x71, 0xaa, 0xd9, 0x2c, 0x33, 0x5d, 0x55, 0x47, 0x76, 0x55, 0x2c, 0xf6, 0x7c, 0x2e, 0x87, 0xc1, + 0xa1, 0x9d, 0xd4, 0xec, 0xe7, 0xcd, 0x49, 0xbd, 0x0d, 0xcb, 0x49, 0x33, 0xe1, 0x1e, 0xea, 0x05, + 0x98, 0xb6, 0x02, 0x3c, 0x64, 0xbb, 0xbd, 0x89, 0x0d, 0x8b, 0x18, 0x3a, 0x43, 0x52, 0xdf, 0x84, + 0x65, 0xb9, 0xaf, 0x0e, 0x17, 0xba, 0xa8, 0xf7, 0x92, 0xb1, 0x4f, 0xe4, 0x2a, 0xb9, 0x7d, 0x64, + 0x6e, 0xfd, 0x24, 0x69, 0x18, 0xa6, 0xfa, 0x03, 0x05, 0x8e, 0x27, 0x8a, 0x72, 0x06, 0xfe, 0x57, + 0x53, 0x03, 0x98, 0xf9, 0xd6, 0x97, 0x0b, 0x6a, 0xf9, 0x14, 0x47, 0xf1, 0x97, 0xa1, 0x25, 0x77, + 0x8f, 0xa4, 0xda, 0x9b, 0x89, 0xa1, 0x7c, 0x7e, 0xac, 0xd0, 0xe1, 0x78, 0xee, 0xc2, 0xc9, 0x4c, + 0xc6, 0x69, 0x9d, 0x97, 0x27, 0xd4, 0xf9, 0xff, 0x94, 0xe2, 0x3e, 0xbb, 0x1d, 0x04, 0x9e, 0xf5, + 0x60, 0x14, 0xe0, 0x4f, 0x36, 0xa8, 0x5a, 0x0f, 0x47, 0x36, 0xf3, 0xb3, 0x2f, 0x64, 0x53, 0x46, + 0xb5, 0x67, 0x8e, 0xf1, 0x9e, 0x3c, 0xc6, 0x2b, 0x94, 0xd5, 0x8b, 0x63, 0x59, 0x15, 0x8e, 0xf6, + 0xcf, 0x72, 0x10, 0xff, 0x83, 0x02, 0x0b, 0x89, 0x5e, 0x41, 0xb7, 0x01, 0x8c, 0x50, 0x74, 0x6e, + 0x1f, 0xe7, 0xc6, 0x35, 0x51, 0x8b, 0xd1, 0x90, 0x39, 0x91, 0xc5, 0x8b, 0x19, 0x73, 0x62, 0x46, + 0xbc, 0x18, 0x86, 0x8b, 0xaf, 0x47, 0x8b, 0x5d, 0xb6, 0x49, 0xaa, 0x16, 0x2e, 0x76, 0x19, 0xad, + 0x20, 0x51, 0x7f, 0xad, 0x04, 0x4b, 0x59, 0xdc, 0xd1, 0xb3, 0x50, 0xee, 0xbb, 0x23, 0xde, 0x12, + 0xe9, 0x68, 0x68, 0xcd, 0x1d, 0xed, 0xf8, 0xc6, 0x1e, 0xd6, 0x08, 0x02, 0xba, 0x0a, 0x33, 0x43, + 0x3c, 0x74, 0xbc, 0x03, 0x2e, 0xb7, 0xb4, 0xdd, 0x70, 0x9f, 0x96, 0x30, 0x6c, 0x8e, 0x86, 0xae, + 0x47, 0x61, 0x35, 0x93, 0x77, 0x55, 0x5a, 0x3d, 0xb0, 0x22, 0x46, 0x12, 0xc6, 0xd2, 0xd7, 0x61, + 0xd6, 0xf5, 0x9c, 0x3e, 0xf6, 0x7d, 0xbe, 0x1b, 0xb2, 0x9a, 0x38, 0xab, 0x22, 0x45, 0x9c, 0x86, + 0x23, 0xa2, 0x5b, 0x00, 0x51, 0x00, 0xc5, 0x67, 0xa6, 0x56, 0x6e, 0xbc, 0xe5, 0x6b, 0x31, 0x6c, + 0xf5, 0x7b, 0x25, 0x58, 0xce, 0xd6, 0x1c, 0xba, 0x1c, 0xd7, 0xcb, 0xc9, 0x0c, 0x55, 0xcb, 0xea, + 0x79, 0x35, 0xa1, 0x9e, 0x33, 0x19, 0x14, 0x59, 0x5a, 0xba, 0x99, 0xd4, 0xd2, 0xd9, 0x0c, 0xc2, + 0x6c, 0x65, 0xdd, 0x4c, 0x2a, 0x2b, 0x8b, 0x34, 0x5b, 0x67, 0xed, 0x0c, 0x9d, 0x9d, 0xcf, 0x6a, + 0x63, 0xbe, 0xea, 0xfe, 0x56, 0x81, 0xb9, 0xb8, 0x5c, 0x72, 0xc8, 0xaa, 0x24, 0x42, 0x56, 0xb4, + 0x09, 0x8b, 0x26, 0xdb, 0xb9, 0xd5, 0x2d, 0x3b, 0xc0, 0xde, 0xae, 0xd1, 0x17, 0x51, 0xe1, 0xf9, + 0x0c, 0xbb, 0xe8, 0x08, 0x1c, 0x26, 0x78, 0x93, 0xd3, 0x86, 0x60, 0xd2, 0x82, 0x90, 0x8f, 0xf0, + 0x5a, 0x13, 0x30, 0x8a, 0x11, 0xa9, 0xff, 0xa2, 0xc0, 0xb1, 0x0c, 0x05, 0x8f, 0x69, 0xc8, 0x4e, + 0x7e, 0x43, 0x2e, 0xe4, 0x77, 0xdd, 0xd8, 0xf6, 0xbc, 0x93, 0xd1, 0x9e, 0xc9, 0xf9, 0xc5, 0x9b, + 0xf5, 0x33, 0x05, 0x8e, 0x67, 0x62, 0x65, 0x6e, 0xaf, 0x5e, 0x87, 0xaa, 0xf7, 0x44, 0x7f, 0x70, + 0x10, 0x60, 0x3f, 0x6b, 0x60, 0xef, 0xc4, 0xce, 0x50, 0x66, 0xbd, 0x27, 0x77, 0x08, 0x1e, 0x7a, + 0x19, 0x6a, 0xde, 0x13, 0x1d, 0x7b, 0x9e, 0xe3, 0x09, 0x5f, 0x94, 0x4b, 0x54, 0xf5, 0x9e, 0x6c, + 0x50, 0x44, 0x52, 0x53, 0x20, 0x6a, 0xaa, 0x8c, 0xa9, 0x29, 0x88, 0x6a, 0x0a, 0xc2, 0x9a, 0xa6, + 0xc7, 0xd4, 0x14, 0xf0, 0x9a, 0xd4, 0x3f, 0x28, 0xc1, 0xa9, 0x22, 0x75, 0x7d, 0x62, 0x8a, 0xd8, + 0x00, 0xe4, 0x3d, 0xd1, 0x5d, 0xa3, 0xff, 0x10, 0x07, 0xbe, 0x6e, 0x7a, 0x8e, 0xeb, 0x62, 0x73, + 0x9c, 0x46, 0x9a, 0xde, 0x93, 0x2e, 0xa3, 0x58, 0x67, 0x04, 0x47, 0xd2, 0xcc, 0x06, 0xa0, 0x20, + 0x5d, 0xf5, 0x18, 0x15, 0x35, 0x83, 0x44, 0xd5, 0xea, 0x87, 0x30, 0x17, 0xf7, 0x10, 0x63, 0x6c, + 0xff, 0x75, 0x68, 0x70, 0x0f, 0xa2, 0xf7, 0x9d, 0x91, 0x1d, 0x8c, 0x53, 0xd4, 0x1c, 0xc7, 0x5e, + 0x23, 0xc8, 0xea, 0xd7, 0xc3, 0xe1, 0xf6, 0xa9, 0x55, 0xf9, 0xcb, 0x25, 0xa8, 0x75, 0x86, 0xc6, + 0x1e, 0xee, 0xb9, 0xb8, 0x4f, 0x66, 0x7a, 0x8b, 0x7c, 0xf0, 0x7e, 0x67, 0x1f, 0xe8, 0x1d, 0x39, + 0x6a, 0x61, 0x71, 0xea, 0xb3, 0xd2, 0x39, 0xa2, 0xe0, 0x30, 0x66, 0x61, 0x72, 0x0d, 0x96, 0x46, + 0x3e, 0xf6, 0x74, 0xdf, 0xc5, 0x7d, 0x6b, 0xd7, 0xc2, 0xa6, 0xce, 0xaa, 0x43, 0xb4, 0x3a, 0x44, + 0xca, 0x7a, 0xa2, 0x88, 0xf2, 0xcc, 0x5a, 0xca, 0x1c, 0xcb, 0x5c, 0xca, 0x7c, 0xdc, 0x50, 0xe6, + 0x3a, 0x54, 0xbf, 0x88, 0x0f, 0xd8, 0x62, 0x7f, 0x42, 0x3a, 0xf5, 0xdb, 0x15, 0x58, 0xc9, 0x39, + 0x06, 0xa2, 0x2b, 0x45, 0x77, 0xa4, 0xbb, 0xd8, 0xb3, 0x1c, 0x53, 0xf4, 0x5a, 0xdf, 0x1d, 0x75, + 0x29, 0x00, 0x9d, 0x04, 0xf2, 0xa1, 0x7f, 0x7d, 0xe4, 0xf0, 0x60, 0xb4, 0xac, 0x55, 0xfb, 0xee, + 0xe8, 0x4b, 0xe4, 0x5b, 0xd0, 0xfa, 0xfb, 0x86, 0x87, 0x99, 0xff, 0x60, 0xb4, 0x3d, 0x0a, 0x40, + 0x2f, 0xc2, 0x71, 0x36, 0x37, 0xea, 0x03, 0x6b, 0x68, 0x11, 0x2f, 0x1b, 0x1b, 0x1a, 0x65, 0x0d, + 0xb1, 0xc2, 0x7b, 0xa4, 0xac, 0x63, 0xb3, 0xc1, 0xa0, 0x42, 0xc3, 0x71, 0x86, 0xba, 0xdf, 0x77, + 0x3c, 0xac, 0x1b, 0xe6, 0x87, 0x74, 0x1c, 0x94, 0xb5, 0xba, 0xe3, 0x0c, 0x7b, 0x04, 0xd6, 0x36, + 0x3f, 0x44, 0x67, 0xa1, 0xde, 0x77, 0x47, 0x3e, 0x0e, 0x74, 0xf2, 0x87, 0x6e, 0xd6, 0xd5, 0x34, + 0x60, 0xa0, 0x35, 0x77, 0xe4, 0xc7, 0x10, 0x86, 0x64, 0x79, 0x36, 0x1b, 0x47, 0xb8, 0x8f, 0x87, + 0xf4, 0xb4, 0x7b, 0x7f, 0xb4, 0x87, 0x5d, 0x63, 0x0f, 0x33, 0xd1, 0xc4, 0x8e, 0x9b, 0x74, 0xda, + 0xfd, 0x0e, 0x47, 0xa1, 0x02, 0x6a, 0xf3, 0xfb, 0xf1, 0x4f, 0x1f, 0xbd, 0x0b, 0xb3, 0x23, 0x9b, + 0x1a, 0xc0, 0x6a, 0x8d, 0xd2, 0x5e, 0x9b, 0xe0, 0xd0, 0xed, 0xca, 0x0e, 0x23, 0xe1, 0x67, 0x80, + 0x9c, 0x01, 0xba, 0x05, 0x2d, 0xae, 0x28, 0xff, 0xb1, 0xe1, 0x26, 0xb5, 0x05, 0x54, 0x05, 0xcb, + 0x0c, 0xa3, 0xf7, 0xd8, 0x70, 0xe3, 0x1a, 0x6b, 0xdd, 0x82, 0xb9, 0x38, 0xd3, 0x43, 0xd9, 0xd2, + 0x1d, 0x68, 0x48, 0x8d, 0x24, 0xbd, 0x4d, 0x95, 0xe2, 0x5b, 0xdf, 0x10, 0x63, 0xab, 0x4a, 0x00, + 0x3d, 0xeb, 0x1b, 0x34, 0x47, 0x81, 0x4a, 0x46, 0xf9, 0x54, 0x34, 0xf6, 0xa1, 0x1a, 0xd0, 0x90, + 0xd2, 0x02, 0x88, 0x4b, 0xa6, 0xe7, 0xff, 0xdc, 0x25, 0x93, 0xdf, 0x04, 0xe6, 0x39, 0x03, 0x21, + 0x01, 0xfd, 0x4d, 0x60, 0xf4, 0x00, 0x9a, 0x1d, 0xa7, 0xd1, 0xdf, 0xb4, 0x0a, 0xfc, 0x88, 0xe7, + 0xf7, 0xd4, 0x34, 0xf6, 0xa1, 0xfe, 0xb6, 0x02, 0xb0, 0x66, 0xb8, 0xc6, 0x03, 0x6b, 0x60, 0x05, + 0x07, 0xe8, 0x22, 0x34, 0x0d, 0xd3, 0xd4, 0xfb, 0x02, 0x62, 0x61, 0x91, 0x70, 0xb5, 0x60, 0x98, + 0xe6, 0x5a, 0x0c, 0x8c, 0x9e, 0x87, 0x45, 0xe2, 0x50, 0x65, 0x5c, 0x96, 0x81, 0xd5, 0x24, 0x05, + 0x12, 0xf2, 0x0d, 0x58, 0x25, 0x7c, 0x8d, 0xe1, 0x03, 0x0b, 0xdb, 0x81, 0x4c, 0xc3, 0x52, 0xb3, + 0x96, 0x0d, 0xd3, 0x6c, 0xb3, 0xe2, 0x38, 0xa5, 0xfa, 0x37, 0x33, 0x70, 0x5a, 0xee, 0xf1, 0x64, + 0xa6, 0xc6, 0x2d, 0x98, 0x4b, 0xc8, 0x9b, 0xca, 0x71, 0x88, 0x5a, 0xa8, 0x49, 0xb8, 0x89, 0x5c, + 0x84, 0x52, 0x2a, 0x17, 0x21, 0x33, 0x0b, 0xa4, 0xfc, 0x09, 0x65, 0x81, 0x54, 0x3e, 0x66, 0x16, + 0xc8, 0xf4, 0x51, 0xb3, 0x40, 0xe6, 0x26, 0xce, 0x02, 0x79, 0x96, 0xba, 0x5e, 0x51, 0x23, 0x0d, + 0x07, 0x98, 0x4f, 0x68, 0x84, 0xdc, 0x6d, 0x91, 0x05, 0x98, 0xc8, 0x16, 0x99, 0x3d, 0x4c, 0xb6, + 0x48, 0x35, 0x37, 0x5b, 0xe4, 0x1c, 0xcc, 0xd9, 0x8e, 0x6e, 0xe3, 0xc7, 0x3a, 0xe9, 0x16, 0x7f, + 0xb5, 0xce, 0xfa, 0xc8, 0x76, 0x36, 0xf1, 0xe3, 0x2e, 0x81, 0xa0, 0xf3, 0x30, 0x37, 0x34, 0xfc, + 0x87, 0xd8, 0xa4, 0x69, 0x1b, 0xfe, 0x6a, 0x83, 0xda, 0x53, 0x9d, 0xc1, 0xba, 0x04, 0x84, 0x9e, + 0x81, 0x50, 0x0e, 0x8e, 0x34, 0x4f, 0x91, 0x1a, 0x02, 0xca, 0xd0, 0x62, 0x99, 0x27, 0x0b, 0x47, + 0xcc, 0x3c, 0x69, 0x1e, 0x26, 0xf3, 0xe4, 0x32, 0x34, 0xc5, 0x6f, 0x91, 0x7a, 0xc2, 0x4e, 0x12, + 0x68, 0xd6, 0xc9, 0x82, 0x28, 0x13, 0xe9, 0x25, 0x79, 0x89, 0x2a, 0x50, 0x98, 0xa8, 0xf2, 0x87, + 0x0a, 0x5f, 0xd3, 0x86, 0x03, 0x88, 0x9f, 0x90, 0x4b, 0xc9, 0x0d, 0xca, 0x51, 0x92, 0x1b, 0xd0, + 0x76, 0x6e, 0xfa, 0xc7, 0xc5, 0x7c, 0x4e, 0xe3, 0x12, 0x40, 0xd4, 0xfb, 0xe1, 0x72, 0xf3, 0x93, + 0x48, 0x63, 0x53, 0xff, 0x53, 0x81, 0xd3, 0x9c, 0x5f, 0x4e, 0xae, 0x57, 0x86, 0x95, 0x2b, 0x39, + 0x56, 0xde, 0xf7, 0xb0, 0x89, 0xed, 0xc0, 0x32, 0x06, 0x34, 0x80, 0x11, 0x27, 0xc8, 0x11, 0x98, + 0xc6, 0x50, 0xe7, 0x61, 0x8e, 0xa5, 0x63, 0xf2, 0x95, 0x27, 0xcb, 0xba, 0xac, 0xd3, 0x8c, 0x4c, + 0xbe, 0xb8, 0xdc, 0xca, 0xf2, 0x2c, 0x95, 0xdc, 0x2d, 0x8b, 0xb1, 0x0e, 0x46, 0x75, 0x60, 0x25, + 0xe7, 0x2c, 0x3f, 0xb3, 0x9b, 0x94, 0x74, 0x37, 0x15, 0x2a, 0x29, 0xdd, 0x4d, 0xdf, 0x56, 0xe0, + 0x6c, 0x6a, 0x05, 0xfc, 0xd9, 0x6b, 0x56, 0xfd, 0x53, 0x25, 0xb4, 0x9f, 0xa4, 0xc9, 0xaf, 0xa5, + 0x4d, 0xfe, 0x99, 0xa2, 0x05, 0x7d, 0xa6, 0xd1, 0xbf, 0x97, 0x6b, 0xf4, 0xcf, 0x17, 0x6e, 0x0e, + 0x8c, 0xd3, 0xe7, 0xbf, 0x29, 0x70, 0x22, 0x57, 0x80, 0x44, 0x3c, 0xa8, 0x24, 0xe3, 0x41, 0x1e, + 0x4b, 0x46, 0xd1, 0x3f, 0x8b, 0x25, 0x69, 0x80, 0xcf, 0x83, 0x36, 0x7d, 0x68, 0x3c, 0xb1, 0x86, + 0xa3, 0x21, 0x0f, 0x26, 0x09, 0xbb, 0xfb, 0x0c, 0x72, 0x94, 0x68, 0xf2, 0x2a, 0x2c, 0x31, 0x47, + 0x4f, 0x03, 0x9a, 0x88, 0x82, 0x05, 0x95, 0x8b, 0xac, 0x8c, 0xc4, 0x36, 0x9c, 0x40, 0x6d, 0xc3, + 0x62, 0xd8, 0xac, 0xc2, 0x5c, 0xa6, 0x58, 0x6e, 0x52, 0x49, 0xce, 0x4d, 0xb2, 0x61, 0x66, 0x1d, + 0x3f, 0xb2, 0xfa, 0xf8, 0x13, 0x49, 0x8b, 0x3e, 0x07, 0x75, 0x17, 0x7b, 0x43, 0xcb, 0xf7, 0xc3, + 0x59, 0xbd, 0xa6, 0xc5, 0x41, 0xea, 0x59, 0xa8, 0xad, 0xad, 0x77, 0x78, 0x95, 0x19, 0xa2, 0xaa, + 0xff, 0x35, 0x03, 0x0b, 0x49, 0x1b, 0xbb, 0x99, 0xca, 0x95, 0x3a, 0x9d, 0xb9, 0xcf, 0x96, 0xb1, + 0xc1, 0xfc, 0xbc, 0x58, 0x7a, 0x95, 0xd2, 0x89, 0x04, 0xe1, 0xf2, 0x4a, 0xac, 0xc8, 0x56, 0x61, + 0xb6, 0xef, 0x0c, 0x87, 0x86, 0x6d, 0x8a, 0xe4, 0x76, 0xfe, 0x49, 0x24, 0x35, 0xbc, 0x3d, 0xb6, + 0xb5, 0x5c, 0xd3, 0xe8, 0x6f, 0x62, 0x02, 0xc4, 0x19, 0x5a, 0x36, 0xcd, 0xb6, 0xa2, 0xbd, 0x54, + 0xd3, 0x80, 0x83, 0xd6, 0x2d, 0x0f, 0x5d, 0x80, 0x0a, 0xb6, 0x1f, 0x89, 0x33, 0x27, 0x69, 0x8b, + 0x53, 0xac, 0x89, 0x34, 0x8a, 0x81, 0x2e, 0xc2, 0xcc, 0x90, 0x98, 0x95, 0x38, 0x91, 0x5f, 0x4c, + 0x25, 0x81, 0x6b, 0x1c, 0x01, 0xbd, 0x00, 0xb3, 0x26, 0xd5, 0x9e, 0x58, 0x04, 0x20, 0x29, 0x6f, + 0x8b, 0x16, 0x69, 0x02, 0x05, 0xbd, 0x15, 0xee, 0xaf, 0xd7, 0xd2, 0x07, 0x5f, 0x09, 0x35, 0x67, + 0x6e, 0xad, 0x6f, 0xca, 0x8b, 0x54, 0x48, 0xef, 0xd2, 0x27, 0xb9, 0x14, 0x2f, 0x55, 0x4f, 0x40, + 0x75, 0xe0, 0xec, 0x31, 0xeb, 0xa9, 0xb3, 0x9b, 0x11, 0x03, 0x67, 0x8f, 0x1a, 0xcf, 0x12, 0x4c, + 0xfb, 0x81, 0x69, 0xd9, 0x34, 0x96, 0xaa, 0x6a, 0xec, 0x83, 0x0c, 0x52, 0xfa, 0x43, 0x77, 0xec, + 0x3e, 0x5e, 0x6d, 0xd0, 0xa2, 0x1a, 0x85, 0x6c, 0xd9, 0x7d, 0xba, 0xa6, 0x0c, 0x82, 0x83, 0xd5, + 0x79, 0x0a, 0x27, 0x3f, 0xa3, 0x6d, 0xee, 0x85, 0x9c, 0x6d, 0xee, 0x84, 0xc0, 0x19, 0xdb, 0xdc, + 0xcd, 0xdc, 0x39, 0x23, 0x49, 0x2b, 0x48, 0x48, 0x1c, 0xb9, 0xb6, 0xde, 0xd1, 0x45, 0xd7, 0x2c, + 0xa6, 0x73, 0xca, 0x43, 0xb3, 0xd7, 0x20, 0xfc, 0xf9, 0x99, 0x9e, 0x32, 0x7c, 0x4f, 0x81, 0xe5, + 0x35, 0x7a, 0xc6, 0x1a, 0xf3, 0x8d, 0x87, 0x49, 0x4f, 0x7a, 0x29, 0xcc, 0x19, 0xcb, 0x48, 0xfc, + 0x49, 0x6a, 0x4a, 0xa4, 0x8c, 0xad, 0xc1, 0xbc, 0x60, 0xcb, 0x89, 0xcb, 0x13, 0x24, 0x9c, 0x35, + 0xfc, 0xf8, 0xa7, 0xfa, 0x3a, 0xac, 0xa4, 0x24, 0xe7, 0x27, 0x5d, 0xc9, 0xcb, 0x07, 0x4c, 0xf0, + 0xf8, 0xe5, 0x03, 0xf5, 0x16, 0x1c, 0xef, 0x05, 0x86, 0x17, 0xa4, 0x9a, 0x3d, 0x01, 0x2d, 0x4d, + 0x25, 0x93, 0x69, 0x79, 0xb6, 0x57, 0x0f, 0x96, 0x7a, 0x81, 0xe3, 0x1e, 0x81, 0x29, 0xf1, 0x3b, + 0xa4, 0xe5, 0xce, 0x48, 0xcc, 0x33, 0xe2, 0x53, 0x5d, 0x61, 0x89, 0x6f, 0xe9, 0xda, 0xbe, 0x00, + 0xcb, 0x2c, 0xef, 0xec, 0x28, 0x8d, 0x38, 0x21, 0xb2, 0xde, 0xd2, 0x7c, 0xef, 0xc2, 0x31, 0x69, + 0xef, 0x9d, 0xe7, 0x69, 0x5c, 0x93, 0xf3, 0x34, 0xf2, 0x8f, 0x39, 0xc2, 0x34, 0x8d, 0xef, 0x94, + 0x62, 0x7e, 0x3c, 0xe7, 0xb0, 0xf6, 0x15, 0x39, 0x4b, 0xe3, 0x6c, 0x3e, 0x57, 0x29, 0x49, 0x23, + 0x6d, 0x9d, 0xe5, 0x0c, 0xeb, 0xdc, 0x49, 0x9d, 0x04, 0x57, 0xd2, 0x59, 0x36, 0x09, 0x09, 0x3f, + 0x95, 0x33, 0xe0, 0x7b, 0x2c, 0x93, 0x23, 0xac, 0x3a, 0x3c, 0xfe, 0x7d, 0x29, 0x71, 0xfc, 0x7b, + 0xb2, 0x40, 0xd2, 0xf0, 0xe0, 0xf7, 0x3b, 0x15, 0xa8, 0x85, 0x65, 0x29, 0x0d, 0xa7, 0x55, 0x55, + 0xca, 0x50, 0x55, 0x7c, 0x7e, 0x2d, 0x1f, 0x71, 0x7e, 0xad, 0x4c, 0x30, 0xbf, 0x9e, 0x84, 0x1a, + 0xfd, 0x41, 0x93, 0xef, 0xd9, 0x7c, 0x59, 0xa5, 0x00, 0x0d, 0xef, 0x46, 0x26, 0x36, 0x33, 0xa1, + 0x89, 0x25, 0xb2, 0x46, 0x66, 0x93, 0x59, 0x23, 0x37, 0xc3, 0xb9, 0xaf, 0x9a, 0x3e, 0xa5, 0x09, + 0x39, 0x66, 0xce, 0x7a, 0x89, 0xad, 0xd9, 0x5a, 0x7a, 0x6b, 0x36, 0xa2, 0xff, 0xdc, 0x9e, 0x22, + 0x6f, 0xb1, 0x54, 0x90, 0xb8, 0x9d, 0x71, 0x1f, 0xf9, 0x8a, 0x74, 0x0a, 0xa7, 0x64, 0xcc, 0x55, + 0xa1, 0x5f, 0x88, 0x9f, 0xbc, 0xed, 0xc0, 0x72, 0x32, 0x85, 0xec, 0x50, 0x3e, 0x2e, 0x27, 0x97, + 0xf5, 0x37, 0xe2, 0x11, 0x5f, 0x4e, 0xe2, 0xe6, 0xcd, 0x54, 0x8e, 0xc1, 0xc4, 0x16, 0x7a, 0x4d, + 0x4e, 0x47, 0x3a, 0xb4, 0x5d, 0xa5, 0xb2, 0x91, 0x68, 0x44, 0x62, 0x78, 0xbc, 0x98, 0x05, 0xe7, + 0x35, 0x0e, 0x69, 0xd3, 0x95, 0xc1, 0xae, 0x65, 0x5b, 0xfe, 0x3e, 0x2b, 0x9f, 0x61, 0x2b, 0x03, + 0x01, 0x6a, 0xd3, 0x5d, 0x4b, 0xfc, 0xc4, 0x0a, 0xf4, 0xbe, 0x63, 0x62, 0x6a, 0xb5, 0xd3, 0x5a, + 0x95, 0x00, 0xd6, 0x1c, 0x13, 0x47, 0xe3, 0xa9, 0x7a, 0xd8, 0xf1, 0x54, 0x4b, 0x8c, 0xa7, 0x65, + 0x98, 0xf1, 0xb0, 0xe1, 0x3b, 0x36, 0xdb, 0xcc, 0xd0, 0xf8, 0x17, 0xe9, 0x88, 0x21, 0xf6, 0x7d, + 0x52, 0x07, 0x0f, 0xc0, 0xf8, 0x67, 0x2c, 0x58, 0x9c, 0x2b, 0x08, 0x16, 0x0b, 0xd2, 0x42, 0x13, + 0xc1, 0x62, 0xa3, 0x20, 0x58, 0x9c, 0x28, 0x2b, 0x34, 0x0a, 0x8b, 0xe7, 0xc7, 0x85, 0xc5, 0xf1, + 0xb8, 0x72, 0x41, 0x8e, 0x2b, 0x5f, 0x8f, 0xaf, 0x50, 0x9b, 0xe9, 0x43, 0xf2, 0xe2, 0xcb, 0x26, + 0x9f, 0xe1, 0x00, 0xfe, 0x47, 0x05, 0x56, 0x52, 0x03, 0x8e, 0x0f, 0xe1, 0x97, 0x12, 0xf9, 0xa6, + 0x85, 0x89, 0x9e, 0x22, 0xdd, 0xb4, 0x2d, 0xa5, 0x9b, 0x5e, 0x2e, 0x22, 0xc9, 0xc9, 0x36, 0x3d, + 0x7a, 0x06, 0xe8, 0xb7, 0x14, 0x40, 0x19, 0x6b, 0xf0, 0x9b, 0x22, 0x5a, 0x3f, 0xc4, 0x6e, 0x19, + 0x0f, 0xd8, 0xdf, 0x8a, 0x02, 0xf6, 0xd2, 0x61, 0xf6, 0x1d, 0xc2, 0xd4, 0x94, 0x1f, 0x97, 0xe0, + 0xec, 0x8e, 0x6b, 0x26, 0xc2, 0x48, 0x8e, 0x35, 0xb9, 0x67, 0xbb, 0x29, 0xe7, 0xd5, 0x1c, 0xb1, + 0x09, 0xe5, 0xa3, 0x34, 0x01, 0x7d, 0x2d, 0x2b, 0xf3, 0xe9, 0x75, 0xe9, 0x8c, 0xb2, 0xb8, 0x81, + 0x63, 0xa6, 0xaf, 0x8f, 0x6b, 0xc2, 0x2a, 0x9c, 0xcb, 0x17, 0x80, 0x87, 0x9c, 0xff, 0x1f, 0x16, + 0x36, 0x9e, 0xe0, 0x7e, 0xef, 0xc0, 0xee, 0x1f, 0x42, 0xeb, 0x4d, 0x28, 0xf7, 0x87, 0x26, 0x3f, + 0x1d, 0x21, 0x3f, 0xe3, 0x51, 0x74, 0x59, 0x8e, 0xa2, 0x75, 0x68, 0x46, 0x35, 0xf0, 0x01, 0xb4, + 0x4c, 0x06, 0x90, 0x49, 0x90, 0x09, 0xf3, 0x39, 0x8d, 0x7f, 0x71, 0x38, 0xf6, 0xd8, 0x4d, 0x16, + 0x06, 0xc7, 0x9e, 0x27, 0x7b, 0xed, 0xb2, 0xec, 0xb5, 0xd5, 0xef, 0x2a, 0x50, 0x27, 0x35, 0x7c, + 0x2c, 0xf9, 0xf9, 0x52, 0xb6, 0x1c, 0x2d, 0x65, 0xc3, 0x15, 0x71, 0x25, 0xbe, 0x22, 0x8e, 0x24, + 0x9f, 0xa6, 0xe0, 0xb4, 0xe4, 0x33, 0x21, 0x1c, 0x7b, 0x9e, 0x7a, 0x0e, 0xe6, 0x98, 0x6c, 0xbc, + 0xe5, 0x4d, 0x28, 0x8f, 0xbc, 0x81, 0xe8, 0xbf, 0x91, 0x37, 0x50, 0xbf, 0xa9, 0x40, 0xa3, 0x1d, + 0x04, 0x46, 0x7f, 0xff, 0x10, 0x0d, 0x08, 0x85, 0x2b, 0xc5, 0x85, 0x4b, 0x37, 0x22, 0x12, 0xb7, + 0x92, 0x23, 0xee, 0xb4, 0x24, 0xae, 0x0a, 0xf3, 0x42, 0x96, 0x5c, 0x81, 0x37, 0x01, 0x75, 0x1d, + 0x2f, 0x78, 0xdb, 0xf1, 0x1e, 0x1b, 0x9e, 0x79, 0xb8, 0x55, 0x2b, 0x82, 0x0a, 0x7f, 0x5b, 0xa0, + 0x7c, 0x61, 0x5a, 0xa3, 0xbf, 0xd5, 0xe7, 0xe0, 0x98, 0xc4, 0x2f, 0xb7, 0xe2, 0x5b, 0x50, 0xa7, + 0xb3, 0x30, 0x5f, 0xd0, 0x3c, 0x1f, 0x3f, 0xd8, 0x1f, 0x33, 0x5b, 0xab, 0xeb, 0xb0, 0x48, 0xe2, + 0x31, 0x0a, 0x0f, 0xfd, 0xcb, 0xd5, 0x44, 0xcc, 0xbf, 0x92, 0x62, 0x91, 0x88, 0xf7, 0x7f, 0xaa, + 0xc0, 0x34, 0x3b, 0xc3, 0x4f, 0xc6, 0x48, 0x27, 0xc9, 0x3c, 0xe7, 0x3a, 0x7a, 0x60, 0xec, 0x85, + 0xef, 0x36, 0x10, 0xc0, 0xb6, 0xb1, 0x47, 0x4f, 0x74, 0x68, 0xa1, 0x69, 0xed, 0x61, 0x3f, 0x10, + 0x27, 0x84, 0x75, 0x02, 0x5b, 0x67, 0x20, 0xa2, 0x18, 0x7a, 0x90, 0x5a, 0xa1, 0xe7, 0xa5, 0xf4, + 0x37, 0xba, 0xc0, 0x2e, 0x41, 0x16, 0x1f, 0x8b, 0xd1, 0xcb, 0x91, 0x2d, 0xa8, 0x26, 0xce, 0xb3, + 0xc2, 0x6f, 0x74, 0x11, 0x2a, 0x74, 0xff, 0x79, 0xb6, 0x48, 0x4b, 0x14, 0x85, 0x58, 0x85, 0x6b, + 0xd9, 0x36, 0x36, 0x69, 0x00, 0x54, 0xd5, 0xf8, 0x97, 0xfa, 0x16, 0xa0, 0xb8, 0xf2, 0x78, 0x07, + 0x5d, 0x84, 0x19, 0xaa, 0x5b, 0x11, 0xc4, 0x2e, 0xa6, 0x58, 0x6b, 0x1c, 0x41, 0xfd, 0x2a, 0x20, + 0x56, 0x97, 0x14, 0xb8, 0x1e, 0xa6, 0x03, 0x0b, 0x42, 0xd8, 0x3f, 0x53, 0xe0, 0x98, 0xc4, 0x9d, + 0xcb, 0xf7, 0x9c, 0xcc, 0x3e, 0x43, 0x3c, 0xce, 0xfa, 0x0d, 0x69, 0x66, 0xbe, 0x98, 0x16, 0xe3, + 0xe7, 0x34, 0x2b, 0xff, 0x93, 0x02, 0xd0, 0x1e, 0x05, 0xfb, 0x7c, 0xa3, 0x35, 0xde, 0x89, 0x4a, + 0xa2, 0x13, 0x5b, 0x50, 0x75, 0x0d, 0xdf, 0x7f, 0xec, 0x78, 0x62, 0x11, 0x19, 0x7e, 0xd3, 0xed, + 0xd1, 0x11, 0x7f, 0xcc, 0xa1, 0xa6, 0xd1, 0xdf, 0xe8, 0x19, 0x98, 0x67, 0x0f, 0x8a, 0xe8, 0x86, + 0x69, 0x7a, 0x22, 0x59, 0xb0, 0xa6, 0x35, 0x18, 0xb4, 0xcd, 0x80, 0x04, 0xcd, 0xa2, 0xa7, 0x11, + 0xc1, 0x81, 0x1e, 0x38, 0x0f, 0xb1, 0xcd, 0x17, 0x86, 0x0d, 0x01, 0xdd, 0x26, 0x40, 0x76, 0xdc, + 0xb8, 0x67, 0xf9, 0x81, 0x27, 0xd0, 0xc4, 0xa1, 0x29, 0x87, 0x52, 0x34, 0xf5, 0x8f, 0x14, 0x68, + 0x76, 0x47, 0x83, 0x01, 0x53, 0xee, 0x51, 0x3a, 0xf9, 0x12, 0x6f, 0x4a, 0x29, 0x6d, 0xf2, 0x91, + 0xa2, 0x78, 0x13, 0x3f, 0x91, 0xbd, 0xac, 0x6b, 0xb0, 0x18, 0x93, 0x98, 0x1b, 0x8e, 0x14, 0xd9, + 0x2b, 0x72, 0x64, 0xaf, 0xb6, 0x01, 0xb1, 0xed, 0x9b, 0x23, 0xb7, 0x52, 0x3d, 0x0e, 0xc7, 0x24, + 0x16, 0x7c, 0x2a, 0xbe, 0x04, 0x0d, 0x9e, 0xb8, 0xc6, 0x0d, 0xe2, 0x04, 0x54, 0x89, 0x4b, 0xed, + 0x5b, 0xa6, 0xc8, 0x90, 0x98, 0x75, 0x1d, 0x73, 0xcd, 0x32, 0x3d, 0xf5, 0x4b, 0xd0, 0xe0, 0x37, + 0xe3, 0x39, 0xee, 0x6d, 0x98, 0xe7, 0xe7, 0x83, 0xba, 0x74, 0x95, 0xf4, 0x44, 0x46, 0x76, 0xa4, + 0x50, 0x85, 0x1d, 0xff, 0x54, 0xbf, 0x06, 0x2d, 0x16, 0x2d, 0x48, 0x8c, 0x45, 0x03, 0x6f, 0x83, + 0x48, 0x4e, 0x2a, 0xe0, 0x2f, 0x53, 0x36, 0xbc, 0xf8, 0xa7, 0x7a, 0x1a, 0x4e, 0x66, 0xf2, 0xe7, + 0xad, 0x77, 0xa1, 0x19, 0x15, 0xb0, 0xfb, 0x8e, 0x61, 0xda, 0x87, 0x12, 0x4b, 0xfb, 0x58, 0x0e, + 0x63, 0xef, 0x92, 0x98, 0xb9, 0x68, 0x78, 0x1d, 0xad, 0xb8, 0xca, 0x79, 0x2b, 0xae, 0x8a, 0xb4, + 0xe2, 0x52, 0xef, 0x87, 0x3a, 0xe4, 0xeb, 0xde, 0xd7, 0xe9, 0xca, 0x9c, 0xd5, 0x2d, 0x9c, 0xda, + 0xa9, 0xec, 0xf6, 0x31, 0x24, 0x2d, 0x86, 0xaf, 0x5e, 0x84, 0x86, 0xec, 0xde, 0x62, 0x1e, 0x4b, + 0x49, 0x79, 0xac, 0xf9, 0x84, 0xb3, 0x7a, 0x31, 0xb1, 0xa4, 0xc8, 0xd2, 0x6b, 0x62, 0x41, 0x71, + 0x43, 0x72, 0x5b, 0x4f, 0x4b, 0x47, 0xf4, 0x3f, 0x27, 0x8f, 0xb5, 0xc4, 0xfd, 0xf8, 0xdb, 0x3e, + 0xa1, 0xe7, 0x0d, 0x55, 0x9f, 0x82, 0xfa, 0x4e, 0xde, 0xfb, 0x24, 0x15, 0x91, 0x57, 0xf6, 0x2a, + 0x2c, 0xbd, 0x6d, 0x0d, 0xb0, 0x7f, 0xe0, 0x07, 0x78, 0xd8, 0xa1, 0xee, 0x65, 0xd7, 0xc2, 0x1e, + 0x3a, 0x03, 0x40, 0x57, 0x91, 0xae, 0x63, 0x85, 0x6f, 0x32, 0xc4, 0x20, 0xea, 0x8f, 0x14, 0x58, + 0x88, 0x08, 0x27, 0x49, 0x1e, 0x7c, 0x05, 0xa6, 0x77, 0x7d, 0xb1, 0xdb, 0x96, 0x38, 0x83, 0xc8, + 0x12, 0x41, 0xab, 0xec, 0xfa, 0x1d, 0x13, 0xbd, 0x0a, 0x30, 0xf2, 0xb1, 0xc9, 0x8f, 0xfd, 0xc6, + 0xa4, 0x73, 0xd6, 0x08, 0x2a, 0x3b, 0x38, 0xbc, 0x01, 0x75, 0xcb, 0x76, 0x4c, 0x4c, 0x8f, 0x84, + 0xcd, 0x71, 0xa9, 0x9c, 0xc0, 0x70, 0x77, 0x7c, 0x6c, 0xaa, 0xbf, 0x17, 0x1d, 0xec, 0x7e, 0x9e, + 0x5b, 0xa8, 0xfe, 0xb1, 0x98, 0x60, 0x45, 0xb7, 0x73, 0x9b, 0x7d, 0x07, 0x16, 0x99, 0x9f, 0xdc, + 0x0d, 0xeb, 0xcc, 0xbc, 0xe3, 0x92, 0x68, 0x9c, 0xd6, 0xb4, 0x78, 0x68, 0x25, 0x88, 0x50, 0x17, + 0x8e, 0x47, 0x11, 0x6f, 0x9c, 0x5b, 0x69, 0x3c, 0xb7, 0xa5, 0x7e, 0x6c, 0x73, 0x56, 0x10, 0xaa, + 0xb7, 0xe0, 0x78, 0x22, 0x8d, 0x7d, 0xf2, 0x1d, 0xfa, 0x77, 0x13, 0x5b, 0x6d, 0xd1, 0x28, 0xbd, + 0x26, 0xdf, 0x9e, 0x2a, 0xba, 0x70, 0xc0, 0x2f, 0xf2, 0xec, 0xc0, 0x09, 0x69, 0x1f, 0x50, 0x92, + 0xe5, 0x46, 0x22, 0xfe, 0x3c, 0x97, 0xcf, 0x2f, 0x11, 0x88, 0xfe, 0xb7, 0x02, 0x4b, 0x59, 0x08, + 0x47, 0xdc, 0x83, 0xfe, 0x20, 0xe7, 0xe6, 0xe5, 0x4b, 0xe3, 0x04, 0xfa, 0x54, 0xf6, 0xec, 0x37, + 0xd9, 0xbd, 0xad, 0xf1, 0x7d, 0x52, 0x9e, 0xac, 0x4f, 0x7e, 0x5a, 0x8a, 0x9d, 0xb3, 0x14, 0xdc, + 0xad, 0xfa, 0x18, 0xfb, 0x9e, 0x6b, 0x89, 0xab, 0x55, 0xcf, 0x67, 0x12, 0x8e, 0xb9, 0x59, 0xa5, + 0x65, 0xed, 0x2f, 0x5c, 0x1b, 0xc7, 0xe9, 0x73, 0xbb, 0x25, 0xfe, 0x9b, 0x25, 0x98, 0x97, 0x3b, + 0x04, 0xbd, 0x95, 0x71, 0xaf, 0xea, 0xec, 0x98, 0x06, 0x4a, 0xd7, 0xaa, 0xf8, 0x3d, 0xa6, 0xd2, + 0xe4, 0xf7, 0x98, 0xca, 0x93, 0xdd, 0x63, 0xba, 0x03, 0xf3, 0x8f, 0x3d, 0x2b, 0x30, 0x1e, 0x0c, + 0xb0, 0x3e, 0x30, 0x0e, 0xb0, 0xc7, 0x1d, 0x7b, 0xa1, 0x2b, 0x6a, 0x08, 0x92, 0x7b, 0x84, 0x82, + 0xae, 0xbc, 0x1e, 0x1b, 0x2e, 0x5f, 0xc0, 0x49, 0x31, 0x61, 0xef, 0xb1, 0xe1, 0x32, 0x1a, 0x8a, + 0xa2, 0x7e, 0xb3, 0x04, 0xc7, 0x33, 0x6f, 0xdf, 0x7c, 0x7c, 0x15, 0x5d, 0x8e, 0xab, 0xe8, 0x30, + 0x57, 0x9a, 0xca, 0x87, 0xba, 0xd2, 0xd4, 0xc9, 0x51, 0x58, 0xd6, 0x41, 0x7e, 0xb1, 0xde, 0xd4, + 0xbf, 0x54, 0xa0, 0x2a, 0x84, 0x1a, 0x7b, 0xc1, 0x68, 0x65, 0x44, 0xd0, 0x74, 0x9a, 0x04, 0x6e, + 0x1b, 0xb6, 0xa3, 0xfb, 0x98, 0x04, 0x65, 0x63, 0xaf, 0x73, 0x2c, 0x51, 0xba, 0x35, 0xc7, 0xc3, + 0x9b, 0x86, 0xed, 0xf4, 0x18, 0x11, 0x6a, 0x43, 0x93, 0xf1, 0xa3, 0xac, 0x08, 0xd3, 0xb1, 0x13, + 0xe5, 0x3c, 0x25, 0x20, 0x4c, 0x08, 0x33, 0x5f, 0xfd, 0xbe, 0x02, 0x0b, 0x09, 0xcd, 0xfe, 0xe2, + 0x35, 0xe2, 0x77, 0xcb, 0x50, 0x8f, 0xf5, 0xf2, 0x98, 0x06, 0xac, 0xc1, 0xa2, 0x48, 0xc6, 0xf1, + 0x71, 0x30, 0xd9, 0x75, 0x9a, 0x05, 0x4e, 0xd1, 0xc3, 0x01, 0x8b, 0xa3, 0x6e, 0xc3, 0x82, 0xf1, + 0xc8, 0xb0, 0x06, 0xd4, 0x82, 0x26, 0x0a, 0x51, 0xe6, 0x43, 0xfc, 0x30, 0x12, 0x63, 0xed, 0x9e, + 0xe8, 0x52, 0x0d, 0x50, 0xdc, 0xe8, 0x6e, 0x93, 0xef, 0xc7, 0x32, 0xbe, 0x0a, 0xef, 0x36, 0xf9, + 0x7e, 0x58, 0x1f, 0xcd, 0x80, 0xa7, 0x97, 0xba, 0x7c, 0xfe, 0x12, 0x48, 0x7e, 0x7d, 0x04, 0xf7, + 0x6d, 0x8a, 0x4a, 0x14, 0x36, 0x34, 0x3e, 0x74, 0x3c, 0x3d, 0x4e, 0x3f, 0x3b, 0x46, 0x61, 0x94, + 0xa2, 0x1b, 0x32, 0x51, 0xff, 0x5c, 0x81, 0x5a, 0xe8, 0x47, 0xc6, 0xf4, 0x50, 0x07, 0x96, 0xe8, + 0x75, 0x81, 0xa4, 0x86, 0xc7, 0x74, 0x12, 0x22, 0x44, 0x6d, 0x59, 0xcb, 0x6d, 0x68, 0x52, 0x56, + 0x71, 0x55, 0x8f, 0xeb, 0x28, 0x5f, 0x88, 0xc9, 0x02, 0xca, 0xbf, 0x2a, 0x01, 0x4a, 0xbb, 0x92, + 0x5f, 0x18, 0x23, 0x8b, 0x77, 0x5a, 0x65, 0xf2, 0x4e, 0xbf, 0x0b, 0xc7, 0xfa, 0xce, 0x70, 0x68, + 0xd1, 0xab, 0x26, 0x8e, 0x77, 0x30, 0x99, 0xb9, 0x2d, 0x32, 0x1a, 0xa6, 0x27, 0xa6, 0xbe, 0x37, + 0xe1, 0x84, 0x86, 0x1d, 0x17, 0xdb, 0xa1, 0xeb, 0xbf, 0xe7, 0xec, 0x1d, 0x22, 0xbe, 0x3d, 0x05, + 0xad, 0x2c, 0x7a, 0xbe, 0x10, 0x1f, 0x41, 0x6b, 0x6d, 0x1f, 0xf7, 0x1f, 0xd2, 0xe5, 0xd7, 0x51, + 0x12, 0x6a, 0x5a, 0x50, 0x1d, 0x38, 0x7d, 0xf6, 0xac, 0x2a, 0xdf, 0xab, 0x12, 0xdf, 0x05, 0xc7, + 0x04, 0xa7, 0xe1, 0x64, 0x66, 0xb5, 0x5c, 0x2a, 0x04, 0xcd, 0xbb, 0x38, 0xd8, 0x78, 0x84, 0xed, + 0x30, 0x7c, 0x56, 0x7f, 0x50, 0x8a, 0x05, 0xea, 0xb4, 0xe8, 0x10, 0x89, 0x48, 0xa8, 0x0b, 0xd1, + 0xca, 0x41, 0xc7, 0x84, 0x9a, 0x3d, 0x72, 0xc8, 0x9e, 0x07, 0xcd, 0x3e, 0xa4, 0xa4, 0x95, 0xd0, + 0xb7, 0x0d, 0xa3, 0xe7, 0x5b, 0x42, 0x58, 0xe2, 0xe8, 0xba, 0x9c, 0x3c, 0xba, 0x7e, 0x17, 0x50, + 0x3c, 0x14, 0xe7, 0xcb, 0xfd, 0xca, 0x04, 0x2f, 0xd6, 0x34, 0xdd, 0xe4, 0xdb, 0x4a, 0x39, 0xef, + 0xce, 0x4c, 0x1f, 0xe9, 0xdd, 0x19, 0xf5, 0x0c, 0x9c, 0x22, 0x01, 0xf6, 0x7d, 0x1c, 0x78, 0x56, + 0x7f, 0x1d, 0xfb, 0x7d, 0xcf, 0x72, 0x03, 0x27, 0xcc, 0x8d, 0x51, 0x75, 0x38, 0x9d, 0x53, 0xce, + 0xd5, 0xfd, 0x26, 0xd4, 0xcd, 0x08, 0x9c, 0xb5, 0x75, 0x92, 0xa4, 0xd5, 0xe2, 0x04, 0xea, 0xfb, + 0xd0, 0x4c, 0x22, 0x64, 0xa6, 0xd2, 0x22, 0xa8, 0xec, 0xe3, 0x81, 0x2b, 0xee, 0x06, 0x91, 0xdf, + 0x44, 0xeb, 0x6c, 0xed, 0xf2, 0x10, 0x1f, 0x88, 0xad, 0xf5, 0x1a, 0x85, 0x7c, 0x11, 0x1f, 0x84, + 0x6d, 0x93, 0x1e, 0x42, 0xf0, 0xac, 0x7e, 0xb2, 0x6d, 0x19, 0xe5, 0x51, 0xdb, 0x48, 0xb7, 0x0d, + 0x19, 0x98, 0xb7, 0xed, 0x74, 0xee, 0x23, 0x0b, 0x94, 0x16, 0x5c, 0xc7, 0xe4, 0xbf, 0xd5, 0x3f, + 0x51, 0x60, 0x31, 0x85, 0x31, 0xe1, 0x71, 0xc9, 0x0b, 0x30, 0x2b, 0xea, 0x2d, 0xa5, 0xf3, 0x4d, + 0x19, 0x2f, 0x4d, 0xa0, 0xa0, 0x0e, 0x2c, 0x46, 0x16, 0x2d, 0xe8, 0xca, 0xe9, 0xbe, 0x88, 0x2f, + 0x5c, 0xa8, 0xb8, 0xcd, 0x7e, 0x02, 0xa2, 0xf6, 0xa1, 0x99, 0xc4, 0x9a, 0x64, 0x4c, 0x1d, 0x4a, + 0x5e, 0xf5, 0xef, 0x15, 0x98, 0x61, 0xb0, 0xcc, 0xce, 0x96, 0xa6, 0x83, 0x52, 0x72, 0x3a, 0x78, + 0x0d, 0xea, 0x8c, 0x8f, 0x1e, 0xde, 0x0c, 0x9b, 0x97, 0x77, 0x8c, 0x19, 0x6b, 0x3a, 0x5a, 0x61, + 0x18, 0xfe, 0x26, 0xcd, 0x60, 0xf6, 0x42, 0x57, 0x26, 0x22, 0xab, 0xb8, 0x4e, 0x61, 0xd4, 0xe5, + 0x92, 0x90, 0x99, 0xaf, 0x61, 0xc6, 0xf8, 0x66, 0xbe, 0xb5, 0xb5, 0x4c, 0x9f, 0xf5, 0x4b, 0xed, + 0x99, 0xaa, 0xdb, 0xf4, 0xdd, 0xbd, 0xf4, 0x5e, 0x27, 0xfa, 0x82, 0x7c, 0xee, 0xfe, 0x4c, 0xea, + 0xd0, 0x5a, 0x22, 0x1b, 0x79, 0xec, 0xf9, 0x69, 0x46, 0xa3, 0x7e, 0x00, 0x27, 0x72, 0x71, 0xd0, + 0x1b, 0xe1, 0x23, 0xa7, 0xa6, 0x67, 0x3d, 0xe2, 0x1b, 0x0b, 0xf3, 0xf2, 0x83, 0x0a, 0x6b, 0x14, + 0x61, 0x9d, 0x96, 0x8b, 0xe7, 0x4f, 0xd9, 0xd7, 0xa5, 0x67, 0xa1, 0x2a, 0x9e, 0x06, 0x47, 0xb3, + 0x50, 0xde, 0x5e, 0xeb, 0x36, 0xa7, 0xc8, 0x8f, 0x9d, 0xf5, 0x6e, 0x53, 0x41, 0x55, 0xa8, 0xf4, + 0xd6, 0xb6, 0xbb, 0xcd, 0xd2, 0xa5, 0x21, 0x34, 0x93, 0xaf, 0x63, 0xa3, 0x15, 0x38, 0xd6, 0xd5, + 0xb6, 0xba, 0xed, 0xbb, 0xed, 0xed, 0xce, 0xd6, 0xa6, 0xde, 0xd5, 0x3a, 0xef, 0xb5, 0xb7, 0x37, + 0x9a, 0x53, 0xe8, 0x3c, 0x9c, 0x8e, 0x17, 0xbc, 0xb3, 0xd5, 0xdb, 0xd6, 0xb7, 0xb7, 0xf4, 0xb5, + 0xad, 0xcd, 0xed, 0x76, 0x67, 0x73, 0x43, 0x6b, 0x2a, 0xe8, 0x34, 0x9c, 0x88, 0xa3, 0xdc, 0xe9, + 0xac, 0x77, 0xb4, 0x8d, 0x35, 0xf2, 0xbb, 0x7d, 0xaf, 0x59, 0xba, 0xf4, 0x06, 0x34, 0xa4, 0xbb, + 0x30, 0x44, 0xa4, 0xee, 0xd6, 0x7a, 0x73, 0x0a, 0x35, 0xa0, 0x16, 0xe7, 0x53, 0x85, 0xca, 0xe6, + 0xd6, 0xfa, 0x46, 0xb3, 0x84, 0x00, 0x66, 0xb6, 0xdb, 0xda, 0xdd, 0x8d, 0xed, 0x66, 0xf9, 0xd2, + 0xad, 0xe4, 0x83, 0x1e, 0x18, 0x2d, 0x42, 0xa3, 0xd7, 0xde, 0x5c, 0xbf, 0xb3, 0xf5, 0x15, 0x5d, + 0xdb, 0x68, 0xaf, 0xbf, 0xdf, 0x9c, 0x42, 0x4b, 0xd0, 0x14, 0xa0, 0xcd, 0xad, 0x6d, 0x06, 0x55, + 0x2e, 0x3d, 0x4c, 0xac, 0x59, 0x31, 0x3a, 0x0e, 0x8b, 0x61, 0x95, 0xfa, 0x9a, 0xb6, 0xd1, 0xde, + 0xde, 0x20, 0x92, 0x48, 0x60, 0x6d, 0x67, 0x73, 0xb3, 0xb3, 0x79, 0xb7, 0xa9, 0x10, 0xae, 0x11, + 0x78, 0xe3, 0x2b, 0x1d, 0x82, 0x5c, 0x92, 0x91, 0x77, 0x36, 0xbf, 0xb8, 0xb9, 0xf5, 0xe5, 0xcd, + 0x66, 0xf9, 0xd2, 0xaf, 0xc4, 0xd3, 0x34, 0xa2, 0x79, 0xe5, 0x24, 0xac, 0xa4, 0x6a, 0xd4, 0x37, + 0xde, 0xdb, 0xd8, 0xdc, 0x6e, 0x4e, 0xc9, 0x85, 0xbd, 0xed, 0xb6, 0x16, 0x15, 0x2a, 0xc9, 0xc2, + 0xad, 0x6e, 0x37, 0x2c, 0x2c, 0xc9, 0x85, 0xeb, 0x1b, 0xf7, 0x36, 0x22, 0xca, 0xf2, 0xa5, 0xa7, + 0x01, 0xa2, 0xf1, 0x83, 0xea, 0x30, 0xbb, 0xb6, 0xb5, 0xb3, 0xb9, 0xbd, 0xa1, 0x35, 0xa7, 0x50, + 0x0d, 0xa6, 0xef, 0xb6, 0x77, 0xee, 0x6e, 0x34, 0x95, 0x4b, 0x17, 0x61, 0x2e, 0x6e, 0x4d, 0x04, + 0xaf, 0xf7, 0x7e, 0x6f, 0x7b, 0xe3, 0x3e, 0xd1, 0xc8, 0x1c, 0x54, 0xd7, 0xee, 0x6a, 0x5b, 0x3b, + 0xdd, 0xb7, 0x7b, 0x4d, 0xe5, 0xfa, 0xff, 0x2e, 0x85, 0x8f, 0xf9, 0xf6, 0xb0, 0x47, 0x6f, 0x20, + 0xac, 0xc3, 0xac, 0x78, 0x4c, 0x5f, 0xda, 0xb5, 0x91, 0x1f, 0xff, 0x6f, 0x9d, 0xcc, 0x2c, 0xe3, + 0x71, 0xc1, 0x14, 0x7a, 0x8f, 0x6e, 0xe3, 0xc7, 0x9e, 0xd3, 0x3a, 0x97, 0xd8, 0x3a, 0x4f, 0xbd, + 0xda, 0xd5, 0x3a, 0x5f, 0x80, 0x11, 0xf2, 0x7d, 0x1f, 0xe6, 0xe5, 0x77, 0x2b, 0xd1, 0x79, 0x79, + 0x8b, 0x3d, 0xe3, 0x49, 0xcc, 0x96, 0x5a, 0x84, 0x12, 0xb2, 0xd6, 0xa1, 0x99, 0x7c, 0xb7, 0x12, + 0x49, 0x99, 0x2b, 0x39, 0xcf, 0x62, 0xb6, 0x9e, 0x2e, 0x46, 0x8a, 0x57, 0x90, 0x7a, 0x8e, 0xf1, + 0xa9, 0xe2, 0x07, 0xee, 0x32, 0x2a, 0xc8, 0x7b, 0x05, 0x8f, 0x29, 0x47, 0x9e, 0x35, 0x51, 0xe2, + 0x05, 0xc4, 0x8c, 0xc7, 0xd2, 0x64, 0xe5, 0x64, 0x3f, 0x94, 0xa5, 0x4e, 0xa1, 0xff, 0x07, 0x0b, + 0x89, 0xf4, 0x72, 0x24, 0x11, 0x66, 0x67, 0xcd, 0xb7, 0x9e, 0x2a, 0xc4, 0x91, 0x7b, 0x35, 0x9e, + 0x42, 0x9e, 0xec, 0xd5, 0x8c, 0xd4, 0xf4, 0x64, 0xaf, 0x66, 0x66, 0xa0, 0x53, 0x43, 0x94, 0xd2, + 0xc5, 0x65, 0x43, 0xcc, 0x4a, 0x4f, 0x6f, 0x9d, 0x2f, 0xc0, 0x88, 0x2b, 0x24, 0x91, 0x30, 0x2e, + 0x2b, 0x24, 0x3b, 0x15, 0xbd, 0xf5, 0x54, 0x21, 0x4e, 0xb2, 0x27, 0xa3, 0x44, 0xd5, 0x74, 0x4f, + 0xa6, 0x92, 0xa5, 0xd3, 0x3d, 0x99, 0xce, 0x73, 0xe5, 0x3d, 0x99, 0x48, 0x2d, 0x55, 0x0b, 0xd3, + 0xde, 0xb2, 0x7a, 0x32, 0x3b, 0x35, 0x4e, 0x9d, 0x42, 0x8f, 0x61, 0x35, 0x2f, 0xbb, 0x09, 0x3d, + 0x7f, 0x88, 0x24, 0xac, 0xd6, 0x0b, 0x93, 0x21, 0x87, 0x15, 0x63, 0x40, 0xe9, 0xe5, 0x13, 0x7a, + 0x46, 0x56, 0x77, 0xce, 0xf2, 0xac, 0xf5, 0xec, 0x38, 0xb4, 0xb0, 0x9a, 0xbb, 0x50, 0x15, 0x79, + 0x53, 0x48, 0x72, 0x81, 0x89, 0x7c, 0xad, 0xd6, 0xa9, 0xec, 0xc2, 0x90, 0xd1, 0x17, 0xa0, 0x42, + 0xa0, 0x68, 0x25, 0x89, 0x27, 0x18, 0xac, 0xa6, 0x0b, 0x42, 0xe2, 0x36, 0xcc, 0xb0, 0x84, 0x20, + 0x24, 0x9d, 0x48, 0x4a, 0x09, 0x4b, 0xad, 0x56, 0x56, 0x51, 0xc8, 0xa2, 0xcb, 0xfe, 0x35, 0x09, + 0xcf, 0xef, 0x41, 0x67, 0x92, 0x2f, 0x56, 0xcb, 0x89, 0x44, 0xad, 0xb3, 0xb9, 0xe5, 0x71, 0x9b, + 0x4d, 0xec, 0x92, 0x9e, 0x2f, 0xd8, 0xf5, 0xcf, 0xb2, 0xd9, 0xec, 0xb3, 0x04, 0xd6, 0xb9, 0xe9, + 0xb3, 0x06, 0xf4, 0x4c, 0xae, 0xbd, 0x4b, 0x55, 0x3c, 0x3b, 0x0e, 0x2d, 0x3e, 0x34, 0x92, 0x4f, + 0x4f, 0xa9, 0x45, 0xcf, 0xc2, 0x65, 0x0d, 0x8d, 0x9c, 0xe7, 0xe6, 0xd4, 0x29, 0xb4, 0x0f, 0xc7, + 0x32, 0xde, 0xa3, 0x43, 0xcf, 0xe6, 0xfb, 0x5f, 0xa9, 0x96, 0xe7, 0xc6, 0xe2, 0xc5, 0x6b, 0xca, + 0x38, 0xd4, 0x97, 0x6b, 0xca, 0xcf, 0x2a, 0x90, 0x6b, 0x2a, 0xca, 0x0e, 0xa0, 0x86, 0xc8, 0x7d, + 0xc8, 0x89, 0xac, 0x93, 0xee, 0x0c, 0x43, 0x4c, 0x79, 0x8c, 0x7d, 0x38, 0x96, 0xb1, 0xc5, 0x20, + 0x0b, 0x9b, 0xbf, 0xf5, 0x21, 0x0b, 0x5b, 0xb4, 0x57, 0x31, 0x85, 0x3e, 0x00, 0x74, 0x17, 0x07, + 0x72, 0x28, 0xe7, 0x23, 0x69, 0xa0, 0x26, 0x77, 0x33, 0x72, 0xec, 0x53, 0xda, 0xd6, 0x50, 0xa7, + 0xae, 0x29, 0xc8, 0x66, 0x37, 0x58, 0x52, 0x8b, 0x71, 0x74, 0x21, 0xd9, 0x6d, 0x79, 0xeb, 0xf9, + 0xd6, 0xc5, 0x09, 0x30, 0xc3, 0xb6, 0xd8, 0xc9, 0xb7, 0x4f, 0xc5, 0x7a, 0xf0, 0x42, 0xbe, 0x99, + 0xc8, 0x6b, 0xec, 0x74, 0x7d, 0xb9, 0xab, 0xed, 0x30, 0x9e, 0x8b, 0x19, 0xd3, 0xb9, 0xfc, 0x14, + 0x93, 0x9c, 0x78, 0x2e, 0xcb, 0x80, 0xae, 0xff, 0x4e, 0x19, 0xe6, 0x58, 0x2a, 0x0e, 0x0f, 0x3f, + 0xef, 0x03, 0x44, 0x59, 0x6d, 0xe8, 0x74, 0x52, 0x46, 0x29, 0x55, 0xb0, 0x75, 0x26, 0xaf, 0x38, + 0xee, 0xe6, 0x62, 0xd9, 0x62, 0xb2, 0x9b, 0x4b, 0x27, 0xbf, 0xc9, 0x6e, 0x2e, 0x23, 0xcd, 0x4c, + 0x9d, 0x42, 0xef, 0x42, 0x2d, 0x4c, 0x4e, 0x92, 0x8d, 0x27, 0x99, 0x65, 0xd5, 0x3a, 0x9d, 0x53, + 0x1a, 0x97, 0x2e, 0x96, 0x73, 0x24, 0x4b, 0x97, 0xce, 0x67, 0x92, 0xa5, 0xcb, 0x4a, 0x56, 0x8a, + 0xda, 0xcb, 0x92, 0x02, 0x32, 0xda, 0x2b, 0x25, 0x89, 0x64, 0xb4, 0x57, 0xce, 0x26, 0x50, 0xa7, + 0xee, 0xdc, 0xfe, 0xe1, 0x4f, 0xce, 0x28, 0x3f, 0xfa, 0xc9, 0x99, 0xa9, 0x5f, 0xfa, 0xe8, 0x8c, + 0xf2, 0xc3, 0x8f, 0xce, 0x28, 0xff, 0xfc, 0xd1, 0x19, 0xe5, 0xc7, 0x1f, 0x9d, 0x51, 0xbe, 0xf5, + 0x1f, 0x67, 0xa6, 0x3e, 0x50, 0x1f, 0xde, 0xf0, 0xaf, 0x58, 0xce, 0xd5, 0xbe, 0x67, 0x5d, 0x36, + 0x5c, 0xeb, 0xaa, 0xfb, 0x70, 0xef, 0xaa, 0xe1, 0x5a, 0xfe, 0x55, 0xce, 0xf7, 0xea, 0xa3, 0x17, + 0x1f, 0xcc, 0xd0, 0x7f, 0x67, 0xf5, 0xd2, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x24, 0xa8, 0x67, + 0x4b, 0x88, 0x6c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -14471,6 +14496,15 @@ func (m *ImageSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.RuntimeHandler) > 0 { + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } if len(m.UserSpecifiedImage) > 0 { i -= len(m.UserSpecifiedImage) copy(dAtA[i:], m.UserSpecifiedImage) @@ -17976,6 +18010,20 @@ func (m *ImageFsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ContainerFilesystems) > 0 { + for iNdEx := len(m.ContainerFilesystems) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerFilesystems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.ImageFilesystems) > 0 { for iNdEx := len(m.ImageFilesystems) - 1; iNdEx >= 0; iNdEx-- { { @@ -20452,6 +20500,10 @@ func (m *ImageSpec) Size() (n int) { if l > 0 { n += 2 + l + sovApi(uint64(l)) } + l = len(m.RuntimeHandler) + if l > 0 { + n += 2 + l + sovApi(uint64(l)) + } return n } @@ -21907,6 +21959,12 @@ func (m *ImageFsInfoResponse) Size() (n int) { n += 1 + l + sovApi(uint64(l)) } } + if len(m.ContainerFilesystems) > 0 { + for _, e := range m.ContainerFilesystems { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } return n } @@ -23241,6 +23299,7 @@ func (this *ImageSpec) String() string { `Image:` + fmt.Sprintf("%v", this.Image) + `,`, `Annotations:` + mapStringForAnnotations + `,`, `UserSpecifiedImage:` + fmt.Sprintf("%v", this.UserSpecifiedImage) + `,`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, `}`, }, "") return s @@ -24227,8 +24286,14 @@ func (this *ImageFsInfoResponse) String() string { repeatedStringForImageFilesystems += strings.Replace(f.String(), "FilesystemUsage", "FilesystemUsage", 1) + "," } repeatedStringForImageFilesystems += "}" + repeatedStringForContainerFilesystems := "[]*FilesystemUsage{" + for _, f := range this.ContainerFilesystems { + repeatedStringForContainerFilesystems += strings.Replace(f.String(), "FilesystemUsage", "FilesystemUsage", 1) + "," + } + repeatedStringForContainerFilesystems += "}" s := strings.Join([]string{`&ImageFsInfoResponse{`, `ImageFilesystems:` + repeatedStringForImageFilesystems + `,`, + `ContainerFilesystems:` + repeatedStringForContainerFilesystems + `,`, `}`, }, "") return s @@ -32795,6 +32860,38 @@ func (m *ImageSpec) Unmarshal(dAtA []byte) error { } m.UserSpecifiedImage = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -43368,6 +43465,40 @@ func (m *ImageFsInfoResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerFilesystems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerFilesystems = append(m.ContainerFilesystems, &FilesystemUsage{}) + if err := m.ContainerFilesystems[len(m.ContainerFilesystems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) diff --git a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto index e16688d838630..bb15dc2478b9b 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto +++ b/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto @@ -120,7 +120,7 @@ service RuntimeService { rpc CheckpointContainer(CheckpointContainerRequest) returns (CheckpointContainerResponse) {} // GetContainerEvents gets container events from the CRI runtime - rpc GetContainerEvents(GetEventsRequest) returns (stream ContainerEventResponse) {} + rpc GetContainerEvents(GetEventsRequest) returns (stream ContainerEventResponse) {} // ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics. // This list should be static at startup: either the client and server restart together when @@ -782,6 +782,10 @@ message ImageSpec { // The container image reference specified by the user (e.g. image[:tag] or digest). // Only set if available within the RPC context. string user_specified_image = 18; + // Runtime handler to use for pulling the image. + // If the runtime handler is unknown, the request should be rejected. + // An empty string would select the default runtime handler. + string runtime_handler = 19; } message KeyValue { @@ -1579,6 +1583,11 @@ message WindowsFilesystemUsage { message ImageFsInfoResponse { // Information of image filesystem(s). repeated FilesystemUsage image_filesystems = 1; + // Information of container filesystem(s). + // This is an optional field, may be used for example if container and image + // storage are separated. + // Default will be to return this as empty. + repeated FilesystemUsage container_filesystems = 2; } message ContainerStatsRequest{ diff --git a/staging/src/k8s.io/cri-api/pkg/apis/services.go b/staging/src/k8s.io/cri-api/pkg/apis/services.go index b21b11ba24c47..eb073de989bec 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/services.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/services.go @@ -131,6 +131,6 @@ type ImageManagerService interface { PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) // RemoveImage removes the image. RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) error - // ImageFsInfo returns information of the filesystem that is used to store images. - ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) + // ImageFsInfo returns information of the filesystem(s) used to store the read-only layers and the writeable layer. + ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) } diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go index 3f432bf18b198..e6c6b131b5057 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go @@ -38,7 +38,8 @@ type FakeImageService struct { pulledImages []*pulledImage - FakeFilesystemUsage []*runtimeapi.FilesystemUsage + FakeFilesystemUsage []*runtimeapi.FilesystemUsage + FakeContainerFilesystemUsage []*runtimeapi.FilesystemUsage } // SetFakeImages sets the list of fake images for the FakeImageService. @@ -93,6 +94,14 @@ func (r *FakeImageService) SetFakeFilesystemUsage(usage []*runtimeapi.Filesystem r.FakeFilesystemUsage = usage } +// SetFakeFilesystemUsage sets the FilesystemUsage for FakeImageService. +func (r *FakeImageService) SetFakeContainerFilesystemUsage(usage []*runtimeapi.FilesystemUsage) { + r.Lock() + defer r.Unlock() + + r.FakeContainerFilesystemUsage = usage +} + // NewFakeImageService creates a new FakeImageService. func NewFakeImageService() *FakeImageService { return &FakeImageService{ @@ -218,7 +227,7 @@ func (r *FakeImageService) RemoveImage(_ context.Context, image *runtimeapi.Imag } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *FakeImageService) ImageFsInfo(_ context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (r *FakeImageService) ImageFsInfo(_ context.Context) (*runtimeapi.ImageFsInfoResponse, error) { r.Lock() defer r.Unlock() @@ -227,7 +236,10 @@ func (r *FakeImageService) ImageFsInfo(_ context.Context) ([]*runtimeapi.Filesys return nil, err } - return r.FakeFilesystemUsage, nil + return &runtimeapi.ImageFsInfoResponse{ + ImageFilesystems: r.FakeFilesystemUsage, + ContainerFilesystems: r.FakeContainerFilesystemUsage, + }, nil } // AssertImagePulledWithAuth validates whether the image was pulled with auth and asserts if it wasn't. diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod index a417c528f611d..3d7a9e64e0353 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.mod +++ b/staging/src/k8s.io/csi-translation-lib/go.mod @@ -2,10 +2,10 @@ module k8s.io/csi-translation-lib -go 1.20 +go 1.21.3 require ( - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/klog/v2 v2.100.1 @@ -21,8 +21,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/text v0.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum index 16f0b702a3cd3..2cc2641c7fc38 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/staging/src/k8s.io/csi-translation-lib/go.sum @@ -39,7 +39,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -48,19 +49,16 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -68,26 +66,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -101,12 +99,11 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/csi-translation-lib/translate_test.go b/staging/src/k8s.io/csi-translation-lib/translate_test.go index 8a08c6e6ed3cf..6b9d0c9be901b 100644 --- a/staging/src/k8s.io/csi-translation-lib/translate_test.go +++ b/staging/src/k8s.io/csi-translation-lib/translate_test.go @@ -231,7 +231,7 @@ func TestTopologyTranslation(t *testing.T) { } // verify that either beta or GA kubernetes topology key should exist if !(plugins.TopologyKeyExist(v1.LabelFailureDomainBetaZone, nodeAffinity) || plugins.TopologyKeyExist(v1.LabelTopologyZone, nodeAffinity)) { - t.Errorf("Expected node affinity kuberenetes topology label exist, got %v", *nodeAffinity) + t.Errorf("Expected node affinity kubernetes topology label exist, got %v", *nodeAffinity) } } else { nodeAffinity := newCSIPV.Spec.NodeAffinity diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go index da2941191cffc..842e53f75ae51 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go @@ -122,6 +122,12 @@ type Driver interface { // can be allocated for it (for example, two GPUs requested but // the node only has one). // + // The potentialNodes slice contains all potential nodes selected + // by the scheduler plus the selected node. The response must + // not contain any other nodes. Implementations do not have to + // care about size limits in the PodSchedulingContext status, the + // caller will handle that. + // // The result of the check is in ClaimAllocation.UnsuitableNodes. // An error indicates that the entire check must be repeated. UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*ClaimAllocation, potentialNodes []string) error @@ -156,6 +162,7 @@ type controller struct { driver Driver setReservedFor bool kubeClient kubernetes.Interface + claimNameLookup *resourceclaim.Lookup queue workqueue.RateLimitingInterface eventRecorder record.EventRecorder rcLister resourcev1alpha2listers.ResourceClassLister @@ -180,6 +187,7 @@ func New( rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses() claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() schedulingCtxInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts() + claimNameLookup := resourceclaim.NewNameLookup(kubeClient) eventBroadcaster := record.NewBroadcaster() go func() { @@ -218,6 +226,7 @@ func New( driver: driver, setReservedFor: true, kubeClient: kubeClient, + claimNameLookup: claimNameLookup, rcLister: rcInformer.Lister(), rcSynced: rcInformer.Informer().HasSynced, claimCache: claimCache, @@ -645,7 +654,7 @@ func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAlloc } func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim v1.PodResourceClaim) (*ClaimAllocation, error) { - claimName, mustCheckOwner, err := resourceclaim.Name(pod, &podClaim) + claimName, mustCheckOwner, err := ctrl.claimNameLookup.Name(pod, &podClaim) if err != nil { return nil, err } @@ -667,6 +676,11 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim // Nothing to do for it as part of pod scheduling. return nil, nil } + if claim.Status.Allocation != nil { + // Already allocated, class and parameter are not needed and nothing + // need to be done for the claim either. + return nil, nil + } class, err := ctrl.rcLister.Get(claim.Spec.ResourceClassName) if err != nil { return nil, err @@ -752,12 +766,20 @@ func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulin // and shouldn't, because those allocations might have to be undone to // pick a better node. If we don't need to allocate now, then we'll // simply report back the gather information. + // + // We shouldn't assume that the scheduler has included the selected node + // in the list of potential nodes. Usually it does, but let's make sure + // that we check it. + selectedNode := schedulingCtx.Spec.SelectedNode + potentialNodes := schedulingCtx.Spec.PotentialNodes + if selectedNode != "" && !hasString(potentialNodes, selectedNode) { + potentialNodes = append(potentialNodes, selectedNode) + } if len(schedulingCtx.Spec.PotentialNodes) > 0 { - if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, schedulingCtx.Spec.PotentialNodes); err != nil { + if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, potentialNodes); err != nil { return fmt.Errorf("checking potential nodes: %v", err) } } - selectedNode := schedulingCtx.Spec.SelectedNode logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode) if selectedNode != "" { unsuitable := false @@ -811,12 +833,12 @@ func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulin schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, resourcev1alpha2.ResourceClaimSchedulingStatus{ Name: delayed.PodClaimName, - UnsuitableNodes: delayed.UnsuitableNodes, + UnsuitableNodes: truncateNodes(delayed.UnsuitableNodes, selectedNode), }) modified = true } else if stringsDiffer(schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) { // Update existing entry. - schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes + schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = truncateNodes(delayed.UnsuitableNodes, selectedNode) modified = true } } @@ -832,6 +854,23 @@ func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulin return errPeriodic } +func truncateNodes(nodes []string, selectedNode string) []string { + // We might have checked "potential nodes + selected node" above, so + // this list might be too long by one element. When truncating it, make + // sure that the selected node is listed. + lenUnsuitable := len(nodes) + if lenUnsuitable > resourcev1alpha2.PodSchedulingNodeListMaxSize { + if nodes[0] == selectedNode { + // Truncate at the end and keep selected node in the first element. + nodes = nodes[0 : lenUnsuitable-1] + } else { + // Truncate at the front, it's not the selected node. + nodes = nodes[1:lenUnsuitable] + } + } + return nodes +} + type claimAllocations []*ClaimAllocation // MarshalLog replaces the pointers with the actual structs because diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go index cfffc610fa85a..bf26913f2faa8 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go @@ -19,6 +19,7 @@ package controller import ( "context" "errors" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -64,6 +65,10 @@ func TestController(t *testing.T) { otherNodeName := "worker-2" unsuitableNodes := []string{otherNodeName} potentialNodes := []string{nodeName, otherNodeName} + maxNodes := make([]string, resourcev1alpha2.PodSchedulingNodeListMaxSize) + for i := range maxNodes { + maxNodes[i] = fmt.Sprintf("node-%d", i) + } withDeletionTimestamp := func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim { var deleted metav1.Time claim = claim.DeepCopy() @@ -101,18 +106,24 @@ func TestController(t *testing.T) { podSchedulingCtx.Spec.SelectedNode = nodeName return podSchedulingCtx } - withUnsuitableNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext { + withSpecificUnsuitableNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext, unsuitableNodes []string) *resourcev1alpha2.PodSchedulingContext { podSchedulingCtx = podSchedulingCtx.DeepCopy() podSchedulingCtx.Status.ResourceClaims = append(podSchedulingCtx.Status.ResourceClaims, resourcev1alpha2.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes}, ) return podSchedulingCtx } - withPotentialNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext { + withUnsuitableNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext { + return withSpecificUnsuitableNodes(podSchedulingCtx, unsuitableNodes) + } + withSpecificPotentialNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext, potentialNodes []string) *resourcev1alpha2.PodSchedulingContext { podSchedulingCtx = podSchedulingCtx.DeepCopy() podSchedulingCtx.Spec.PotentialNodes = potentialNodes return podSchedulingCtx } + withPotentialNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext { + return withSpecificPotentialNodes(podSchedulingCtx, potentialNodes) + } var m mockDriver @@ -376,6 +387,48 @@ func TestController(t *testing.T) { expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))), expectedError: errPeriodic.Error(), }, + // pod with delayed allocation, potential nodes, selected node, all unsuitable -> update unsuitable nodes + "pod-selected-is-potential-node": { + key: podKey, + classes: classes, + claim: delayedClaim, + expectedClaim: delayedClaim, + pod: podWithClaim, + schedulingCtx: withPotentialNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))), + driver: m.expectClassParameters(map[string]interface{}{className: 1}). + expectClaimParameters(map[string]interface{}{claimName: 2}). + expectUnsuitableNodes(map[string][]string{podClaimName: potentialNodes}, nil), + expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx)), potentialNodes), + expectedError: errPeriodic.Error(), + }, + // pod with delayed allocation, max potential nodes, other selected node, all unsuitable -> update unsuitable nodes with truncation at start + "pod-selected-is-potential-node-truncate-first": { + key: podKey, + classes: classes, + claim: delayedClaim, + expectedClaim: delayedClaim, + pod: podWithClaim, + schedulingCtx: withSpecificPotentialNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), maxNodes), + driver: m.expectClassParameters(map[string]interface{}{className: 1}). + expectClaimParameters(map[string]interface{}{claimName: 2}). + expectUnsuitableNodes(map[string][]string{podClaimName: append(maxNodes, nodeName)}, nil), + expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append(maxNodes[1:], nodeName)), + expectedError: errPeriodic.Error(), + }, + // pod with delayed allocation, max potential nodes, other selected node, all unsuitable (but in reverse order) -> update unsuitable nodes with truncation at end + "pod-selected-is-potential-node-truncate-last": { + key: podKey, + classes: classes, + claim: delayedClaim, + expectedClaim: delayedClaim, + pod: podWithClaim, + schedulingCtx: withSpecificPotentialNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), maxNodes), + driver: m.expectClassParameters(map[string]interface{}{className: 1}). + expectClaimParameters(map[string]interface{}{claimName: 2}). + expectUnsuitableNodes(map[string][]string{podClaimName: append([]string{nodeName}, maxNodes...)}, nil), + expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append([]string{nodeName}, maxNodes[:len(maxNodes)-1]...)), + expectedError: errPeriodic.Error(), + }, } { t.Run(name, func(t *testing.T) { _, ctx := ktesting.NewTestContext(t) diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod index 9a696fae0d4bd..96cf5d8714c1d 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod @@ -2,23 +2,24 @@ module k8s.io/dynamic-resource-allocation -go 1.20 +go 1.21.3 require ( github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 - github.com/stretchr/testify v1.8.2 - google.golang.org/grpc v1.54.0 + github.com/stretchr/testify v1.8.4 + google.golang.org/grpc v1.58.2 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 k8s.io/klog/v2 v2.100.1 k8s.io/kubelet v0.0.0 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -37,20 +38,19 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.sum b/staging/src/k8s.io/dynamic-resource-allocation/go.sum index 7f074b2a14a98..f0b728576540f 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.sum +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.sum @@ -1,4 +1,4 @@ -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -8,15 +8,15 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -32,7 +32,7 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -81,10 +81,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -106,14 +106,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -122,33 +122,34 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -156,11 +157,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -176,11 +178,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go b/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go index 3fb1bced3d6fe..93d69695ef485 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go @@ -26,10 +26,14 @@ package resourceclaim import ( "errors" "fmt" + "os" + "strings" v1 "k8s.io/api/core/v1" resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" ) var ( @@ -59,9 +63,7 @@ var ( // In this case the boolean determines whether IsForPod must be called // after retrieving the ResourceClaim and before using it. // -// If podClaim.Template is not nil, the caller must check that the -// ResourceClaim is indeed the one that was created for the Pod by calling -// IsUsable. +// Determining the name depends on Kubernetes >= 1.28. func Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) (name *string, mustCheckOwner bool, err error) { switch { case podClaim.Source.ResourceClaimName != nil: @@ -78,6 +80,73 @@ func Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) (name *string, mustCheckOw } } +// NewNameLookup returns an object which handles determining the name of +// a ResourceClaim. In contrast to the stand-alone Name it is compatible +// also with Kubernetes < 1.28. +// +// Providing a client is optional. If none is available, then code can pass nil +// and users can set the DRA_WITH_DETERMINISTIC_RESOURCE_CLAIM_NAMES env +// variable to an arbitrary non-empty value to use the naming from Kubernetes < +// 1.28. +func NewNameLookup(client kubernetes.Interface) *Lookup { + return &Lookup{client: client} +} + +// Lookup stores the state which is necessary to look up ResourceClaim names. +type Lookup struct { + client kubernetes.Interface + usePodStatus *bool +} + +// Name is a variant of the stand-alone Name with support also for Kubernetes < 1.28. +func (l *Lookup) Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) (name *string, mustCheckOwner bool, err error) { + if l.usePodStatus == nil { + if value, _ := os.LookupEnv("DRA_WITH_DETERMINISTIC_RESOURCE_CLAIM_NAMES"); value != "" { + l.usePodStatus = ptr.To(false) + } else if l.client != nil { + // Check once. This does not detect upgrades or + // downgrades, but that is good enough for the simple + // test scenarios that the Kubernetes < 1.28 support is + // meant for. + info, err := l.client.Discovery().ServerVersion() + if err != nil { + return nil, false, fmt.Errorf("look up server version: %v", err) + } + if info.Major == "" { + // Fake client... + l.usePodStatus = ptr.To(true) + } else { + switch strings.Compare(info.Major, "1") { + case -1: + // Huh? + l.usePodStatus = ptr.To(false) + case 0: + // info.Minor may have a suffix which makes it larger than 28. + // We don't care about pre-releases here. + l.usePodStatus = ptr.To(strings.Compare("28", info.Minor) <= 0) + case 1: + // Kubernetes 2? Yeah! + l.usePodStatus = ptr.To(true) + } + } + } + } + + if *l.usePodStatus { + return Name(pod, podClaim) + } + + switch { + case podClaim.Source.ResourceClaimName != nil: + return podClaim.Source.ResourceClaimName, false, nil + case podClaim.Source.ResourceClaimTemplateName != nil: + name := pod.Name + "-" + podClaim.Name + return &name, true, nil + default: + return nil, false, fmt.Errorf(`pod "%s/%s", spec.resourceClaim %q: %w`, pod.Namespace, pod.Name, podClaim.Name, ErrAPIUnsupported) + } +} + // IsForPod checks that the ResourceClaim is the one that // was created for the Pod. It returns an error that is informative // enough to be returned by the caller without adding further details diff --git a/staging/src/k8s.io/endpointslice/go.mod b/staging/src/k8s.io/endpointslice/go.mod index 01656c03fb587..ec527ad558578 100644 --- a/staging/src/k8s.io/endpointslice/go.mod +++ b/staging/src/k8s.io/endpointslice/go.mod @@ -2,12 +2,12 @@ module k8s.io/endpointslice -go 1.20 +go 1.21.3 require ( github.com/davecgh/go-spew v1.1.1 github.com/google/go-cmp v0.5.9 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 @@ -20,7 +20,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -46,18 +46,18 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/staging/src/k8s.io/endpointslice/go.sum b/staging/src/k8s.io/endpointslice/go.sum index 315ca32a9c2e1..f7c9663282bde 100644 --- a/staging/src/k8s.io/endpointslice/go.sum +++ b/staging/src/k8s.io/endpointslice/go.sum @@ -1,4 +1,5 @@ -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -16,8 +17,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -60,7 +61,7 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -93,10 +94,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -122,28 +123,26 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -152,10 +151,10 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -164,23 +163,23 @@ golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -188,10 +187,9 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -207,11 +205,11 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/kms/apis/v2/api.pb.go b/staging/src/k8s.io/kms/apis/v2/api.pb.go index 1b634f9323e50..13715bd9c8c4b 100644 --- a/staging/src/k8s.io/kms/apis/v2/api.pb.go +++ b/staging/src/k8s.io/kms/apis/v2/api.pb.go @@ -289,6 +289,9 @@ type EncryptResponse struct { KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` // Additional metadata to be stored with the encrypted data. // This data is stored in plaintext in etcd. KMS plugin implementations are responsible for pre-encrypting any sensitive data. + // Annotations must satisfy the following constraints: + // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). + // 2. The size of annotations keys + values is less than 32 kB. Annotations map[string][]byte `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/staging/src/k8s.io/kms/apis/v2/api.proto b/staging/src/k8s.io/kms/apis/v2/api.proto index 3c7d335e8b665..d2c90048e77e6 100644 --- a/staging/src/k8s.io/kms/apis/v2/api.proto +++ b/staging/src/k8s.io/kms/apis/v2/api.proto @@ -74,5 +74,8 @@ message EncryptResponse { string key_id = 2; // Additional metadata to be stored with the encrypted data. // This data is stored in plaintext in etcd. KMS plugin implementations are responsible for pre-encrypting any sensitive data. + // Annotations must satisfy the following constraints: + // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). + // 2. The size of annotations keys + values is less than 32 kB. map annotations = 3; } diff --git a/staging/src/k8s.io/kms/go.mod b/staging/src/k8s.io/kms/go.mod index f4234f31ebed4..69a4594bec747 100644 --- a/staging/src/k8s.io/kms/go.mod +++ b/staging/src/k8s.io/kms/go.mod @@ -2,31 +2,20 @@ module k8s.io/kms -go 1.20 +go 1.21.3 require ( github.com/gogo/protobuf v1.3.2 - google.golang.org/grpc v1.54.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v0.0.0 - k8s.io/klog/v2 v2.100.1 + google.golang.org/grpc v1.58.2 ) require ( - github.com/go-logr/logr v1.2.4 // indirect github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect ) -replace ( - k8s.io/api => ../api - k8s.io/apimachinery => ../apimachinery - k8s.io/client-go => ../client-go - k8s.io/kms => ../kms -) +replace k8s.io/kms => ../kms diff --git a/staging/src/k8s.io/kms/go.sum b/staging/src/k8s.io/kms/go.sum index 448f4f192a701..f6591ece9a8ba 100644 --- a/staging/src/k8s.io/kms/go.sum +++ b/staging/src/k8s.io/kms/go.sum @@ -1,65 +1,29 @@ -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -67,51 +31,40 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/staging/src/k8s.io/kms/internal/mock_aes_remote_service.go b/staging/src/k8s.io/kms/internal/mock_aes_remote_service.go deleted file mode 100644 index a863c02278767..0000000000000 --- a/staging/src/k8s.io/kms/internal/mock_aes_remote_service.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "crypto/aes" - "errors" - - aestransformer "k8s.io/kms/pkg/encrypt/aes" - "k8s.io/kms/pkg/service" - "k8s.io/kms/pkg/value" -) - -var _ service.Service = &mockAESRemoteService{} - -const ( - mockAnnotationKey = "version.encryption.remote.io" -) - -type mockAESRemoteService struct { - keyID string - transformer value.Transformer - dataCtx value.DefaultContext -} - -func (s *mockAESRemoteService) Encrypt(ctx context.Context, uid string, plaintext []byte) (*service.EncryptResponse, error) { - out, err := s.transformer.TransformToStorage(ctx, plaintext, s.dataCtx) - if err != nil { - return nil, err - } - - return &service.EncryptResponse{ - KeyID: s.keyID, - Ciphertext: out, - Annotations: map[string][]byte{ - mockAnnotationKey: []byte("1"), - }, - }, nil -} - -func (s *mockAESRemoteService) Decrypt(ctx context.Context, uid string, req *service.DecryptRequest) ([]byte, error) { - if len(req.Annotations) != 1 { - return nil, errors.New("invalid annotations") - } - if v, ok := req.Annotations[mockAnnotationKey]; !ok || string(v) != "1" { - return nil, errors.New("invalid version in annotations") - } - if req.KeyID != s.keyID { - return nil, errors.New("invalid keyID") - } - from, _, err := s.transformer.TransformFromStorage(ctx, req.Ciphertext, s.dataCtx) - if err != nil { - return nil, err - } - return from, nil -} - -func (s *mockAESRemoteService) Status(ctx context.Context) (*service.StatusResponse, error) { - resp := &service.StatusResponse{ - Version: "v2beta1", - Healthz: "ok", - KeyID: s.keyID, - } - return resp, nil -} - -// NewMockAESService creates an instance of mockAESRemoteService. -func NewMockAESService(aesKey string, keyID string) (service.Service, error) { - block, err := aes.NewCipher([]byte(aesKey)) - if err != nil { - return nil, err - } - if len(keyID) == 0 { - return nil, errors.New("invalid keyID") - } - return &mockAESRemoteService{ - transformer: aestransformer.NewGCMTransformer(block), - keyID: keyID, - dataCtx: value.DefaultContext([]byte{}), - }, nil -} diff --git a/staging/src/k8s.io/kms/internal/mock_aes_remote_service_test.go b/staging/src/k8s.io/kms/internal/mock_aes_remote_service_test.go deleted file mode 100644 index 123363b925073..0000000000000 --- a/staging/src/k8s.io/kms/internal/mock_aes_remote_service_test.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "bytes" - "context" - "testing" - - "k8s.io/kms/pkg/service" -) - -const ( - version = "v2beta1" - testAESKey = "abcdefghijklmnop" - testKeyID = "test-key-id" - testPlaintext = "lorem ipsum dolor sit amet" -) - -func testContext(t *testing.T) context.Context { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - return ctx -} - -func TestMockAESRemoteService(t *testing.T) { - t.Parallel() - ctx := testContext(t) - - plaintext := []byte(testPlaintext) - - kmsService, err := NewMockAESService(testAESKey, testKeyID) - if err != nil { - t.Fatal(err) - } - - t.Run("should be able to encrypt and decrypt", func(t *testing.T) { - t.Parallel() - - encRes, err := kmsService.Encrypt(ctx, "", plaintext) - if err != nil { - t.Fatal(err) - } - - if bytes.Equal(plaintext, encRes.Ciphertext) { - t.Fatal("plaintext and ciphertext shouldn't be equal!") - } - - decRes, err := kmsService.Decrypt(ctx, "", &service.DecryptRequest{ - Ciphertext: encRes.Ciphertext, - KeyID: encRes.KeyID, - Annotations: encRes.Annotations, - }) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(decRes, plaintext) { - t.Errorf("want: %q, have: %q", plaintext, decRes) - } - }) - - t.Run("should return error when decrypt with an invalid keyID", func(t *testing.T) { - t.Parallel() - - encRes, err := kmsService.Encrypt(ctx, "", plaintext) - if err != nil { - t.Fatal(err) - } - - if bytes.Equal(plaintext, encRes.Ciphertext) { - t.Fatal("plaintext and ciphertext shouldn't be equal!") - } - - _, err = kmsService.Decrypt(ctx, "", &service.DecryptRequest{ - Ciphertext: encRes.Ciphertext, - KeyID: encRes.KeyID + "1", - Annotations: encRes.Annotations, - }) - if err.Error() != "invalid keyID" { - t.Errorf("should have returned an invalid keyID error. Got %v, requested keyID: %q, remote service keyID: %q", err, encRes.KeyID+"1", testKeyID) - } - }) - - t.Run("should return status data", func(t *testing.T) { - t.Parallel() - - status, err := kmsService.Status(ctx) - if err != nil { - t.Fatal(err) - } - - if status.Healthz != "ok" { - t.Errorf("want: %q, have: %q", "ok", status.Healthz) - } - if len(status.KeyID) == 0 { - t.Errorf("want: len(keyID) > 0, have: %d", len(status.KeyID)) - } - if status.Version != version { - t.Errorf("want %q, have: %q", version, status.Version) - } - }) -} diff --git a/staging/src/k8s.io/kms/internal/mock_latency_remote_service.go b/staging/src/k8s.io/kms/internal/mock_latency_remote_service.go deleted file mode 100644 index 188e6e5693609..0000000000000 --- a/staging/src/k8s.io/kms/internal/mock_latency_remote_service.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "time" - - "k8s.io/kms/pkg/service" -) - -type mockLatencyRemoteService struct { - delegate service.Service - latency time.Duration -} - -var _ service.Service = &mockLatencyRemoteService{} - -func (s *mockLatencyRemoteService) Decrypt(ctx context.Context, uid string, req *service.DecryptRequest) ([]byte, error) { - time.Sleep(s.latency) - return s.delegate.Decrypt(ctx, uid, req) -} - -func (s *mockLatencyRemoteService) Encrypt(ctx context.Context, uid string, data []byte) (*service.EncryptResponse, error) { - time.Sleep(s.latency) - return s.delegate.Encrypt(ctx, uid, data) -} - -func (s *mockLatencyRemoteService) Status(ctx context.Context) (*service.StatusResponse, error) { - // Passthrough here, not adding any delays for status as delays are usually negligible compare to encrypt and decrypt requests. - return s.delegate.Status(ctx) -} - -// NewMockLatencyService creates an instance of mockLatencyRemoteService. -func NewMockLatencyService(delegate service.Service, latency time.Duration) service.Service { - return &mockLatencyRemoteService{ - delegate: delegate, - latency: latency, - } -} diff --git a/staging/src/k8s.io/kms/internal/mock_latency_remote_service_test.go b/staging/src/k8s.io/kms/internal/mock_latency_remote_service_test.go deleted file mode 100644 index 5512b5b8231cc..0000000000000 --- a/staging/src/k8s.io/kms/internal/mock_latency_remote_service_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "bytes" - "testing" - "time" - - "k8s.io/kms/pkg/service" -) - -const ( - testLatencyInMillisecond = 100 * time.Millisecond -) - -func TestMockLatencyRemoteService(t *testing.T) { - t.Parallel() - ctx := testContext(t) - - plaintext := []byte(testPlaintext) - aesService, err := NewMockAESService(testAESKey, testKeyID) - if err != nil { - t.Fatal(err) - } - kmsService := NewMockLatencyService(aesService, testLatencyInMillisecond) - - t.Run("should be able to encrypt and decrypt with some known latency", func(t *testing.T) { - t.Parallel() - start := time.Now() - encRes, err := kmsService.Encrypt(ctx, "", plaintext) - if err != nil { - t.Fatal(err) - } - - duration := time.Since(start) - - if bytes.Equal(plaintext, encRes.Ciphertext) { - t.Fatal("plaintext and ciphertext shouldn't be equal!") - } - // Max is set to 3s to limit the risk of a CPU limited CI node taking a long time to do encryption. - if duration < testLatencyInMillisecond || duration > 3*time.Second { - t.Errorf("duration for encrypt should be around: %q, have: %q", testLatencyInMillisecond, duration) - } - start = time.Now() - decRes, err := kmsService.Decrypt(ctx, "", &service.DecryptRequest{ - Ciphertext: encRes.Ciphertext, - KeyID: encRes.KeyID, - Annotations: encRes.Annotations, - }) - if err != nil { - t.Fatal(err) - } - duration = time.Since(start) - - if !bytes.Equal(decRes, plaintext) { - t.Errorf("want: %q, have: %q", plaintext, decRes) - } - if duration < testLatencyInMillisecond || duration > 3*time.Second { - t.Errorf("duration decrypt should be around: %q, have: %q", testLatencyInMillisecond, duration) - } - }) - - t.Run("should return status data", func(t *testing.T) { - t.Parallel() - start := time.Now() - status, err := kmsService.Status(ctx) - if err != nil { - t.Fatal(err) - } - duration := time.Since(start) - if status.Healthz != "ok" { - t.Errorf("want: %q, have: %q", "ok", status.Healthz) - } - if len(status.KeyID) == 0 { - t.Errorf("want: len(keyID) > 0, have: %d", len(status.KeyID)) - } - if status.Version != version { - t.Errorf("want %q, have: %q", version, status.Version) - } - if duration > 3*time.Second { - t.Errorf("duration status should be less than: 3s, have: %q", duration) - } - }) -} diff --git a/staging/src/k8s.io/kms/internal/mock_ratelimit_remote_service.go b/staging/src/k8s.io/kms/internal/mock_ratelimit_remote_service.go deleted file mode 100644 index 837c77814e606..0000000000000 --- a/staging/src/k8s.io/kms/internal/mock_ratelimit_remote_service.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "k8s.io/client-go/util/flowcontrol" - "k8s.io/kms/pkg/service" -) - -type mockRateLimitRemoteService struct { - delegate service.Service - limiter flowcontrol.RateLimiter -} - -var _ service.Service = &mockRateLimitRemoteService{} - -func (s *mockRateLimitRemoteService) Decrypt(ctx context.Context, uid string, req *service.DecryptRequest) ([]byte, error) { - if !s.limiter.TryAccept() { - return nil, status.New(codes.ResourceExhausted, "remote decrypt rate limit exceeded").Err() - } - return s.delegate.Decrypt(ctx, uid, req) -} - -func (s *mockRateLimitRemoteService) Encrypt(ctx context.Context, uid string, data []byte) (*service.EncryptResponse, error) { - if !s.limiter.TryAccept() { - return nil, status.New(codes.ResourceExhausted, "remote encrypt rate limit exceeded").Err() - } - return s.delegate.Encrypt(ctx, uid, data) -} - -func (s *mockRateLimitRemoteService) Status(ctx context.Context) (*service.StatusResponse, error) { - // Passthrough here, not adding any rate limiting for status as rate limits are usually for encrypt and decrypt requests. - return s.delegate.Status(ctx) -} - -// NewMockRateLimitService creates an instance of mockRateLimitRemoteService. -func NewMockRateLimitService(delegate service.Service, qps float32, burst int) service.Service { - return &mockRateLimitRemoteService{ - delegate: delegate, - limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst), - } -} diff --git a/staging/src/k8s.io/kms/internal/mock_ratelimit_remote_service_test.go b/staging/src/k8s.io/kms/internal/mock_ratelimit_remote_service_test.go deleted file mode 100644 index 8c5b807eba4f7..0000000000000 --- a/staging/src/k8s.io/kms/internal/mock_ratelimit_remote_service_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "bytes" - "testing" -) - -const ( - testQPS = 1 - // testBurst should be no more than 9 since 9*100millisecond (test latency) = 900ms, which guarantees there is enough bursts per second. - testBurst = 5 -) - -func TestMockRateLimitRemoteService(t *testing.T) { - t.Parallel() - ctx := testContext(t) - plaintext := []byte(testPlaintext) - aesService, err := NewMockAESService(testAESKey, testKeyID) - if err != nil { - t.Fatal(err) - } - mockLatencyService := NewMockLatencyService(aesService, testLatencyInMillisecond) - kmsService := NewMockRateLimitService(mockLatencyService, testQPS, testBurst) - - t.Run("should hit rate limit", func(t *testing.T) { - rateLimitExceeded := false - for i := 0; i < 100; i++ { - encRes, err := kmsService.Encrypt(ctx, "", plaintext) - if i >= testBurst { - if err != nil { - if err.Error() != "rpc error: code = ResourceExhausted desc = remote encrypt rate limit exceeded" { - t.Fatalf("should have failed with rate limit exceeded %d, have err: %v", testBurst, err) - } - rateLimitExceeded = true - } else { - if bytes.Equal(plaintext, encRes.Ciphertext) { - t.Fatal("plaintext and ciphertext shouldn't be equal!") - } - } - } else { - if err != nil { - t.Fatalf("err: %v, i: %d", err, i) - } - if bytes.Equal(plaintext, encRes.Ciphertext) { - t.Fatal("plaintext and ciphertext shouldn't be equal!") - } - } - // status should not hit any rate limit - _, err = kmsService.Status(ctx) - if err != nil { - t.Fatal(err) - } - } - if !rateLimitExceeded { - t.Errorf("should have reached the rate limit of %d", testBurst) - } - }) -} diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/Dockerfile b/staging/src/k8s.io/kms/internal/plugins/_mock/Dockerfile similarity index 57% rename from staging/src/k8s.io/kms/internal/plugins/mock/Dockerfile rename to staging/src/k8s.io/kms/internal/plugins/_mock/Dockerfile index adebe69408124..afe4e957f5172 100644 --- a/staging/src/k8s.io/kms/internal/plugins/mock/Dockerfile +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/Dockerfile @@ -12,25 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.20.1-bullseye as builder +FROM golang:1.21.3-bullseye as builder WORKDIR /workspace # Copy the source COPY apimachinery/ apimachinery/ -COPY client-go/ client-go/ COPY kms/ kms/ -WORKDIR /workspace/kms/internal/plugins/mock +WORKDIR /workspace/kms/internal/plugins/_mock ARG TARGETARCH ARG TARGETPLATFORM -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o mock-kms-plugin plugin.go -RUN chmod +x mock-kms-plugin +RUN CGO_ENABLED=1 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o mock-kms-plugin plugin.go -# Use distroless as minimal base image to package the manager binary -# Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM --platform=${TARGETPLATFORM:-linux/amd64} gcr.io/distroless/static:nonroot -WORKDIR / -COPY --from=builder /workspace/kms/internal/plugins/mock/mock-kms-plugin . +FROM alpine:latest -ENTRYPOINT [ "/mock-kms-plugin" ] +RUN apk add --update --no-cache ca-certificates gcompat +RUN apk add --no-cache softhsm + +COPY --from=builder /workspace/kms/internal/plugins/_mock/mock-kms-plugin /usr/local/bin/mock-kms-plugin + +ENTRYPOINT [ "mock-kms-plugin" ] diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/README.md b/staging/src/k8s.io/kms/internal/plugins/_mock/README.md new file mode 100644 index 0000000000000..93ccf397fb5d0 --- /dev/null +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/README.md @@ -0,0 +1,5 @@ +# Mock KMS Plugin + +This is a mock KMS plugin for testing purposes. It implements the KMS plugin using PKCS#11 interface backed by [SoftHSM](https://www.opendnssec.org/softhsm/). It is intended to be used for testing only and not for production use. + +The directory is named `_mock` so that it is ignored by the `go mod` tooling in the root directory. diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod new file mode 100644 index 0000000000000..e5d7f5fdb792b --- /dev/null +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod @@ -0,0 +1,24 @@ +module k8s.io/kms/plugins/mock + +go 1.21.3 + +require ( + github.com/ThalesIgnite/crypto11 v1.2.5 + k8s.io/kms v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect + google.golang.org/protobuf v1.31.0 // indirect +) + +replace k8s.io/kms => ../../../../kms diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/go.sum b/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum similarity index 62% rename from staging/src/k8s.io/kms/internal/plugins/mock/go.sum rename to staging/src/k8s.io/kms/internal/plugins/_mock/go.sum index 4237d8303bc94..02f3457d30172 100644 --- a/staging/src/k8s.io/kms/internal/plugins/mock/go.sum +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.sum @@ -1,6 +1,7 @@ -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -8,8 +9,21 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f h1:eVB9ELsoq5ouItQBr5Tj334bhPJG/MX+m7rTchmzVUQ= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -21,22 +35,20 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -45,15 +57,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/kms.yaml b/staging/src/k8s.io/kms/internal/plugins/_mock/kms.yaml new file mode 100644 index 0000000000000..f2c01f1db4d02 --- /dev/null +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/kms.yaml @@ -0,0 +1,69 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mock-kmsv2-provider + namespace: kube-system + labels: + tier: control-plane + component: mock-kmsv2-provider +spec: + # hostNetwork: true is required because the plugin is run as a static pod + # on the control plane node and needs to run before the CNI plugins are initialized. + hostNetwork: true + initContainers: + - args: + - | + #!/bin/sh + set -e + set -x + + # if token exists, skip initialization + if [ $(ls -1 /var/lib/softhsm/tokens | wc -l) -ge 1 ]; then + echo "Skipping initialization of softhsm" + exit 0 + fi + + mkdir -p /var/lib/softhsm/tokens + apk add --update --no-cache ca-certificates jq + apk add --no-cache ccid opensc softhsm + + TOKEN_LABEL=$(jq -r '.tokenLabel' /etc/softhsm-config.json) + PIN=$(jq -r '.pin' /etc/softhsm-config.json) + MODULE_PATH=$(jq -r '.path' /etc/softhsm-config.json) + + softhsm2-util --init-token --free --label $TOKEN_LABEL --pin $PIN --so-pin $PIN + pkcs11-tool --module $MODULE_PATH --keygen --key-type aes:32 --pin $PIN --token-label $TOKEN_LABEL --label kms-test + command: + - /bin/sh + - -c + image: alpine:latest + imagePullPolicy: IfNotPresent + name: init-mock-kmsv2-provider + volumeMounts: + - mountPath: /var/lib/softhsm/tokens + name: softhsm-tokens + - mountPath: /etc/softhsm-config.json + name: softhsm-config + containers: + - name: mock-kmsv2-provider + image: localhost:5000/mock-kms-provider:e2e + imagePullPolicy: IfNotPresent + volumeMounts: + - name: sock + mountPath: /tmp + - name: softhsm-config + mountPath: /etc/softhsm-config.json + - name: softhsm-tokens + mountPath: /var/lib/softhsm/tokens + volumes: + - name: sock + hostPath: + path: /tmp + - name: softhsm-config + hostPath: + path: /etc/softhsm-config.json + type: File + - name: softhsm-tokens + hostPath: + path: /var/lib/softhsm/tokens + type: DirectoryOrCreate diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/pkcs11/pkcs11.go b/staging/src/k8s.io/kms/internal/plugins/_mock/pkcs11/pkcs11.go new file mode 100644 index 0000000000000..f1cc51940fc01 --- /dev/null +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/pkcs11/pkcs11.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkcs11 + +import ( + "context" + "crypto/cipher" + "crypto/rand" + "fmt" + + crypot11 "github.com/ThalesIgnite/crypto11" + + "k8s.io/kms/pkg/service" +) + +const ( + mockAnnotationKey = "version.encryption.remote.io" +) + +var _ service.Service = &pkcs11RemoteService{} + +type pkcs11RemoteService struct { + keyID string + aead cipher.AEAD +} + +// NewPKCS11RemoteService creates a new PKCS11 remote service with SoftHSMv2 configuration file and keyID +func NewPKCS11RemoteService(configFilePath, keyID string) (service.Service, error) { + ctx, err := crypot11.ConfigureFromFile(configFilePath) + if err != nil { + return nil, err + } + + if len(keyID) == 0 { + return nil, fmt.Errorf("invalid keyID") + } + + remoteService := &pkcs11RemoteService{ + keyID: keyID, + } + + key, err := ctx.FindKey(nil, []byte(keyID)) + if err != nil { + return nil, err + } + if key == nil { + return nil, fmt.Errorf("key not found") + } + if remoteService.aead, err = key.NewGCM(); err != nil { + return nil, err + } + + return remoteService, nil +} + +func (s *pkcs11RemoteService) Encrypt(ctx context.Context, uid string, plaintext []byte) (*service.EncryptResponse, error) { + nonceSize := s.aead.NonceSize() + result := make([]byte, nonceSize+s.aead.Overhead()+len(plaintext)) + n, err := rand.Read(result[:nonceSize]) + if err != nil { + return nil, err + } + if n != nonceSize { + return nil, fmt.Errorf("unable to read sufficient random bytes") + } + cipherText := s.aead.Seal(result[nonceSize:nonceSize], result[:nonceSize], plaintext, []byte(s.keyID)) + + return &service.EncryptResponse{ + Ciphertext: result[:nonceSize+len(cipherText)], + KeyID: s.keyID, + Annotations: map[string][]byte{ + mockAnnotationKey: []byte("1"), + }, + }, nil +} + +func (s *pkcs11RemoteService) Decrypt(ctx context.Context, uid string, req *service.DecryptRequest) ([]byte, error) { + if len(req.Annotations) != 1 { + return nil, fmt.Errorf("invalid annotations") + } + if v, ok := req.Annotations[mockAnnotationKey]; !ok || string(v) != "1" { + return nil, fmt.Errorf("invalid version in annotations") + } + + if req.KeyID != s.keyID { + return nil, fmt.Errorf("invalid keyID") + } + + nonceSize := s.aead.NonceSize() + data := req.Ciphertext + if len(data) < nonceSize { + return nil, fmt.Errorf("the stored data was shorter than the required size") + } + + return s.aead.Open(nil, data[:nonceSize], data[nonceSize:], []byte(s.keyID)) +} + +func (s *pkcs11RemoteService) Status(ctx context.Context) (*service.StatusResponse, error) { + return &service.StatusResponse{ + Version: "v2", + Healthz: "ok", + KeyID: s.keyID, + }, nil +} diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/plugin.go b/staging/src/k8s.io/kms/internal/plugins/_mock/plugin.go similarity index 71% rename from staging/src/k8s.io/kms/internal/plugins/mock/plugin.go rename to staging/src/k8s.io/kms/internal/plugins/_mock/plugin.go index ad9d2903d1ac2..c5c32717777e6 100644 --- a/staging/src/k8s.io/kms/internal/plugins/mock/plugin.go +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/plugin.go @@ -24,15 +24,15 @@ import ( "syscall" "time" - "k8s.io/klog/v2" - "k8s.io/kms/internal" "k8s.io/kms/pkg/service" "k8s.io/kms/pkg/util" + "k8s.io/kms/plugins/mock/pkcs11" ) var ( - listenAddr = flag.String("listen-addr", "unix:///tmp/kms.socket", "gRPC listen address") - timeout = flag.Duration("timeout", 5*time.Second, "gRPC timeout") + listenAddr = flag.String("listen-addr", "unix:///tmp/kms.socket", "gRPC listen address") + timeout = flag.Duration("timeout", 5*time.Second, "gRPC timeout") + configFilePath = flag.String("config-file-path", "/etc/softhsm-config.json", "SoftHSM config file path") ) func main() { @@ -40,14 +40,12 @@ func main() { addr, err := util.ParseEndpoint(*listenAddr) if err != nil { - klog.ErrorS(err, "failed to parse endpoint") - os.Exit(1) + panic("failed to parse endpoint: " + err.Error()) } - remoteKMSService, err := internal.NewMockAESService("somerandomstring", "aes-key-id") + remoteKMSService, err := pkcs11.NewPKCS11RemoteService(*configFilePath, "kms-test") if err != nil { - klog.ErrorS(err, "failed to create remote service") - os.Exit(1) + panic("failed to create remote service: " + err.Error()) } ctx := withShutdownSignal(context.Background()) @@ -57,16 +55,13 @@ func main() { remoteKMSService, ) - klog.InfoS("starting server", "listenAddr", *listenAddr) go func() { if err := grpcService.ListenAndServe(); err != nil { - klog.ErrorS(err, "failed to serve") - os.Exit(1) + panic("failed to serve: " + err.Error()) } }() <-ctx.Done() - klog.InfoS("shutting down server") grpcService.Shutdown() } @@ -80,7 +75,6 @@ func withShutdownSignal(ctx context.Context) context.Context { go func() { <-signalChan - klog.InfoS("received shutdown signal") cancel() }() return nctx diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/mock/go.mod deleted file mode 100644 index 68e0417892591..0000000000000 --- a/staging/src/k8s.io/kms/internal/plugins/mock/go.mod +++ /dev/null @@ -1,29 +0,0 @@ -module k8s.io/kms/plugins/mock - -go 1.19 - -require ( - k8s.io/klog/v2 v2.100.1 - k8s.io/kms v0.0.0-00010101000000-000000000000 -) - -require ( - github.com/go-logr/logr v1.2.4 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/grpc v1.54.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect - k8s.io/client-go v0.0.0 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect -) - -replace ( - k8s.io/apimachinery => ../../../../apimachinery - k8s.io/client-go => ../../../../client-go - k8s.io/kms => ../../../../kms -) diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/kms.yaml b/staging/src/k8s.io/kms/internal/plugins/mock/kms.yaml deleted file mode 100644 index 28398cfe75f94..0000000000000 --- a/staging/src/k8s.io/kms/internal/plugins/mock/kms.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: mock-kmsv2-provider - namespace: kube-system - labels: - tier: control-plane - component: mock-kmsv2-provider -spec: - hostNetwork: true - containers: - - name: mock-kmsv2-provider - image: localhost:5000/mock-kms-provider:e2e - imagePullPolicy: IfNotPresent - volumeMounts: - - name: sock - mountPath: /tmp - volumes: - - name: sock - hostPath: - path: /tmp diff --git a/staging/src/k8s.io/kms/pkg/encrypt/aes/aes.go b/staging/src/k8s.io/kms/pkg/encrypt/aes/aes.go deleted file mode 100644 index fad4794873774..0000000000000 --- a/staging/src/k8s.io/kms/pkg/encrypt/aes/aes.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Vendored from kubernetes/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go -// * commit: 90b42f91fd904b71fd52ca9ae55a5de73e6b779a -// * link: https://github.com/kubernetes/kubernetes/blob/90b42f91fd904b71fd52ca9ae55a5de73e6b779a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes.go - -// Package aes transforms values for storage at rest using AES-GCM. -package aes - -import ( - "context" - "crypto/cipher" - "crypto/rand" - "fmt" - - "k8s.io/kms/pkg/value" -) - -// gcm implements AEAD encryption of the provided values given a cipher.Block algorithm. -// The authenticated data provided as part of the value.Context method must match when the same -// value is set to and loaded from storage. In order to ensure that values cannot be copied by -// an attacker from a location under their control, use characteristics of the storage location -// (such as the etcd key) as part of the authenticated data. -// -// Because this mode requires a generated IV and IV reuse is a known weakness of AES-GCM, keys -// must be rotated before a birthday attack becomes feasible. NIST SP 800-38D -// (http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf) recommends using the same -// key with random 96-bit nonces (the default nonce length) no more than 2^32 times, and -// therefore transformers using this implementation *must* ensure they allow for frequent key -// rotation. Future work should include investigation of AES-GCM-SIV as an alternative to -// random nonces. -type gcm struct { - block cipher.Block -} - -// NewGCMTransformer takes the given block cipher and performs encryption and decryption on the given -// data. -func NewGCMTransformer(block cipher.Block) value.Transformer { - return &gcm{block: block} -} - -func (t *gcm) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { - aead, err := cipher.NewGCM(t.block) - if err != nil { - return nil, false, err - } - nonceSize := aead.NonceSize() - if len(data) < nonceSize { - return nil, false, fmt.Errorf("the stored data was shorter than the required size") - } - result, err := aead.Open(nil, data[:nonceSize], data[nonceSize:], dataCtx.AuthenticatedData()) - return result, false, err -} - -func (t *gcm) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { - aead, err := cipher.NewGCM(t.block) - if err != nil { - return nil, err - } - nonceSize := aead.NonceSize() - result := make([]byte, nonceSize+aead.Overhead()+len(data)) - n, err := rand.Read(result[:nonceSize]) - if err != nil { - return nil, err - } - if n != nonceSize { - return nil, fmt.Errorf("unable to read sufficient random bytes") - } - cipherText := aead.Seal(result[nonceSize:nonceSize], result[:nonceSize], data, dataCtx.AuthenticatedData()) - return result[:nonceSize+len(cipherText)], nil -} diff --git a/staging/src/k8s.io/kms/pkg/service/grpc_service.go b/staging/src/k8s.io/kms/pkg/service/grpc_service.go index fc463e5c4af24..d2d06da270016 100644 --- a/staging/src/k8s.io/kms/pkg/service/grpc_service.go +++ b/staging/src/k8s.io/kms/pkg/service/grpc_service.go @@ -23,7 +23,6 @@ import ( "google.golang.org/grpc" - "k8s.io/klog/v2" kmsapi "k8s.io/kms/apis/v2" ) @@ -45,8 +44,6 @@ func NewGRPCService( kmsService Service, ) *GRPCService { - klog.V(4).InfoS("KMS plugin configured", "address", address, "timeout", timeout) - return &GRPCService{ addr: address, timeout: timeout, @@ -70,14 +67,12 @@ func (s *GRPCService) ListenAndServe() error { kmsapi.RegisterKeyManagementServiceServer(gs, s) - klog.V(4).InfoS("kms plugin serving", "address", s.addr) return gs.Serve(ln) } // Shutdown performs a graceful shutdown. Doesn't accept new connections and // blocks until all pending RPCs are finished. func (s *GRPCService) Shutdown() { - klog.V(4).InfoS("kms plugin shutdown", "address", s.addr) if s.server != nil { s.server.GracefulStop() } @@ -86,7 +81,6 @@ func (s *GRPCService) Shutdown() { // Close stops the server by closing all connections immediately and cancels // all active RPCs. func (s *GRPCService) Close() { - klog.V(4).InfoS("kms plugin close", "address", s.addr) if s.server != nil { s.server.Stop() } @@ -108,8 +102,6 @@ func (s *GRPCService) Status(ctx context.Context, _ *kmsapi.StatusRequest) (*kms // Decrypt sends a decryption request to specified kms service. func (s *GRPCService) Decrypt(ctx context.Context, req *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) { - klog.V(4).InfoS("decrypt request received", "id", req.Uid) - plaintext, err := s.kmsService.Decrypt(ctx, req.Uid, &DecryptRequest{ Ciphertext: req.Ciphertext, KeyID: req.KeyId, @@ -126,8 +118,6 @@ func (s *GRPCService) Decrypt(ctx context.Context, req *kmsapi.DecryptRequest) ( // Encrypt sends an encryption request to specified kms service. func (s *GRPCService) Encrypt(ctx context.Context, req *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) { - klog.V(4).InfoS("encrypt request received", "id", req.Uid) - encRes, err := s.kmsService.Encrypt(ctx, req.Uid, req.Plaintext) if err != nil { return nil, err diff --git a/staging/src/k8s.io/kms/pkg/service/grpc_service_test.go b/staging/src/k8s.io/kms/pkg/service/grpc_service_test.go index e6f9b6a517a04..3c10fc6f247a6 100644 --- a/staging/src/k8s.io/kms/pkg/service/grpc_service_test.go +++ b/staging/src/k8s.io/kms/pkg/service/grpc_service_test.go @@ -31,11 +31,10 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "k8s.io/apimachinery/pkg/util/wait" kmsapi "k8s.io/kms/apis/v2" ) -const version = "v2alpha1" +const version = "v2" func TestGRPCService(t *testing.T) { t.Parallel() @@ -64,18 +63,28 @@ func TestGRPCService(t *testing.T) { client := newClient(t, address) // make sure the gRPC server is up before running tests - if err := wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (bool, error) { - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) - defer cancel() - - _, err := client.Status(ctx, &kmsapi.StatusRequest{}) - if err != nil { - t.Logf("failed to get kms status: %v", err) +ready: + for { + select { + case <-ctx.Done(): + t.Fatalf("server failed to start in time: %v", ctx.Err()) + + default: + if done := func() bool { + ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + + _, err := client.Status(ctx, &kmsapi.StatusRequest{}) + if err != nil { + t.Logf("failed to get kms status: %v", err) + } + + return err == nil + }(); done { + break ready + } + time.Sleep(time.Second) } - - return err == nil, nil - }); err != nil { - t.Fatal(err) } t.Run("should be able to encrypt and decrypt through unix domain sockets", func(t *testing.T) { diff --git a/staging/src/k8s.io/kms/pkg/value/interface.go b/staging/src/k8s.io/kms/pkg/value/interface.go deleted file mode 100644 index d7ad3013fe5f2..0000000000000 --- a/staging/src/k8s.io/kms/pkg/value/interface.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import "context" - -// Vendored from kubernetes/staging/src/k8s.io/apiserver/pkg/storage/value/transformer.go -// * commit: 59e1a32fc8ed35e328a3971d3a1d640ffc28ff55 -// * link: https://github.com/kubernetes/kubernetes/blob/59e1a32fc8ed35e328a3971d3a1d640ffc28ff55/staging/src/k8s.io/apiserver/pkg/storage/value/transformer.go - -// Transformer allows a value to be transformed before being read from or written to the underlying store. The methods -// must be able to undo the transformation caused by the other. -type Transformer interface { - // TransformFromStorage may transform the provided data from its underlying storage representation or return an error. - // Stale is true if the object on disk is stale and a write to etcd should be issued, even if the contents of the object - // have not changed. - TransformFromStorage(ctx context.Context, data []byte, dataCtx Context) (out []byte, stale bool, err error) - // TransformToStorage may transform the provided data into the appropriate form in storage or return an error. - TransformToStorage(ctx context.Context, data []byte, dataCtx Context) (out []byte, err error) -} - -// Context is additional information that a storage transformation may need to verify the data at rest. -type Context interface { - // AuthenticatedData should return an array of bytes that describes the current value. If the value changes, - // the transformer may report the value as unreadable or tampered. This may be nil if no such description exists - // or is needed. For additional verification, set this to data that strongly identifies the value, such as - // the key and creation version of the stored data. - AuthenticatedData() []byte -} - -// DefaultContext is a simple implementation of Context for a slice of bytes. -type DefaultContext []byte - -// AuthenticatedData returns itself. -func (c DefaultContext) AuthenticatedData() []byte { return c } diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index 05bbe49e96f20..6cd7ec4355a7d 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -2,17 +2,17 @@ module k8s.io/kube-aggregator -go 1.20 +go 1.21.3 require ( - github.com/emicklei/go-restful/v3 v3.9.0 + github.com/emicklei/go-restful/v3 v3.11.0 github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.5.9 github.com/google/gofuzz v1.2.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - golang.org/x/net v0.13.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.17.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -20,7 +20,7 @@ require ( k8s.io/code-generator v0.0.0 k8s.io/component-base v0.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/structured-merge-diff/v4 v4.3.0 ) @@ -38,7 +38,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -49,14 +49,16 @@ require ( github.com/google/cel-go v0.17.6 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -71,42 +73,41 @@ require ( go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/otel/trace v1.10.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/kms v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 0ca22e5b9871f..308e825788f24 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -1,168 +1,132 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -174,25 +138,12 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -205,27 +156,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -250,79 +191,34 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -332,11 +228,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -348,8 +241,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -366,6 +257,7 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -378,10 +270,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -391,7 +283,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= @@ -399,7 +290,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -407,7 +297,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -424,16 +313,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= @@ -452,32 +339,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -490,286 +369,73 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -777,14 +443,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -792,27 +456,17 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go index 95b03a1dd1fe0..97411783f368c 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go @@ -65,7 +65,7 @@ type APIServiceSpec struct { CABundle []byte // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. - // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) // We'd recommend something like: *.k8s.io (except extensions) at 18000 and diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto index c3ff865142230..8413a158b2622 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto @@ -102,8 +102,8 @@ message APIServiceSpec { // +optional optional bytes caBundle = 5; - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. - // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) // We'd recommend something like: *.k8s.io (except extensions) at 18000 and diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go index 9954d7e887692..14f71c70449b0 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go @@ -68,8 +68,8 @@ type APIServiceSpec struct { // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,5,opt,name=caBundle"` - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. - // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) // We'd recommend something like: *.k8s.io (except extensions) at 18000 and diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index 94d73fb7e2088..dca9b1ed63812 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -102,8 +102,8 @@ message APIServiceSpec { // +optional optional bytes caBundle = 5; - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. - // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) // We'd recommend something like: *.k8s.io (except extensions) at 18000 and diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go index 11cb3fb65b7e7..83fb8445f11e9 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go @@ -71,8 +71,8 @@ type APIServiceSpec struct { // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,5,opt,name=caBundle"` - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. - // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) // We'd recommend something like: *.k8s.io (except extensions) at 18000 and diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go index b49f9cd3b875e..f0f2b7140b4e4 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go @@ -144,8 +144,9 @@ type APIAggregator struct { // proxyHandlers are the proxy handlers that are currently registered, keyed by apiservice.name proxyHandlers map[string]*proxyHandler - // handledGroups are the groups that already have routes - handledGroups sets.String + // handledGroupVersions contain the groups that already have routes. The key is the name of the group and the value + // is the versions for the group. + handledGroupVersions map[string]sets.Set[string] // lister is used to add group handling for /apis/ aggregator lookups based on // controller state @@ -158,7 +159,7 @@ type APIAggregator struct { openAPIConfig *openapicommon.Config // Enable OpenAPI V3 if these configs are non-nil - openAPIV3Config *openapicommon.Config + openAPIV3Config *openapicommon.OpenAPIV3Config // openAPIAggregationController downloads and merges OpenAPI v2 specs. openAPIAggregationController *openapicontroller.AggregationController @@ -235,7 +236,7 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg delegateHandler: delegationTarget.UnprotectedHandler(), proxyTransportDial: proxyTransportDial, proxyHandlers: map[string]*proxyHandler{}, - handledGroups: sets.String{}, + handledGroupVersions: map[string]sets.Set[string]{}, lister: informerFactory.Apiregistration().V1().APIServices().Lister(), APIRegistrationInformers: informerFactory, serviceResolver: c.ExtraConfig.ServiceResolver, @@ -406,7 +407,7 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) { }) } - if s.openAPIV3Config != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.OpenAPIV3) { + if s.openAPIV3Config != nil { s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapiv3-controller", func(context genericapiserver.PostStartHookContext) error { go s.openAPIV3AggregationController.Run(context.StopCh) return nil @@ -446,13 +447,13 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) { s.openAPIAggregationController = openapicontroller.NewAggregationController(&specDownloader, openAPIAggregator) } - if s.openAPIV3Config != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.OpenAPIV3) { + if s.openAPIV3Config != nil { specDownloaderV3 := openapiv3aggregator.NewDownloader() openAPIV3Aggregator, err := openapiv3aggregator.BuildAndRegisterAggregator( specDownloaderV3, s.GenericAPIServer.NextDelegate(), s.GenericAPIServer.Handler.GoRestfulContainer, - s.openAPIConfig, + s.openAPIV3Config, s.GenericAPIServer.Handler.NonGoRestfulMux) if err != nil { return preparedAPIAggregator{}, err @@ -524,7 +525,9 @@ func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error { } // if we've already registered the path with the handler, we don't want to do it again. - if s.handledGroups.Has(apiService.Spec.Group) { + versions, exist := s.handledGroupVersions[apiService.Spec.Group] + if exist { + versions.Insert(apiService.Spec.Version) return nil } @@ -539,7 +542,7 @@ func (s *APIAggregator) AddAPIService(apiService *v1.APIService) error { // aggregation is protected s.GenericAPIServer.Handler.NonGoRestfulMux.Handle(groupPath, groupDiscoveryHandler) s.GenericAPIServer.Handler.NonGoRestfulMux.UnlistedHandle(groupPath+"/", groupDiscoveryHandler) - s.handledGroups.Insert(apiService.Spec.Group) + s.handledGroupVersions[apiService.Spec.Group] = sets.New[string](apiService.Spec.Version) return nil } @@ -568,8 +571,18 @@ func (s *APIAggregator) RemoveAPIService(apiServiceName string) { } delete(s.proxyHandlers, apiServiceName) - // TODO unregister group level discovery when there are no more versions for the group - // We don't need this right away because the handler properly delegates when no versions are present + versions, exist := s.handledGroupVersions[version.Group] + if !exist { + return + } + versions.Delete(version.Version) + if versions.Len() > 0 { + return + } + delete(s.handledGroupVersions, version.Group) + groupPath := "/apis/" + version.Group + s.GenericAPIServer.Handler.NonGoRestfulMux.Unregister(groupPath) + s.GenericAPIServer.Handler.NonGoRestfulMux.Unregister(groupPath + "/") } // DefaultAPIResourceConfigSource returns default configuration for an APIResource. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator.go index 8491293e72511..c35ac49094ce1 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator.go @@ -63,11 +63,13 @@ const ( type openAPISpecInfo struct { apiService v1.APIService // spec is the cached OpenAPI spec - spec cached.Replaceable[*spec.Swagger] + spec cached.LastSuccess[*spec.Swagger] // The downloader is used only for non-local apiservices to // re-update the spec every so often. - downloader cached.Data[*spec.Swagger] + // Calling Get() is not thread safe and should only be called by a single + // thread via the openapi controller. + downloader CacheableDownloader } type specAggregator struct { @@ -88,13 +90,12 @@ func buildAndRegisterSpecAggregatorForLocalServices(downloader *Downloader, aggr downloader: downloader, specsByAPIServiceName: map[string]*openAPISpecInfo{}, } - cachedAggregatorSpec := cached.NewResultOK(aggregatorSpec, "never-changes") + cachedAggregatorSpec := cached.Static(aggregatorSpec, "never-changes") s.addLocalSpec(fmt.Sprintf(localDelegateChainNamePattern, 0), cachedAggregatorSpec) for i, handler := range delegationHandlers { name := fmt.Sprintf(localDelegateChainNamePattern, i+1) - spec := NewCacheableDownloader(downloader, handler) - spec = decorateError(name, spec) + spec := NewCacheableDownloader(name, downloader, handler) s.addLocalSpec(name, spec) } @@ -132,55 +133,55 @@ func BuildAndRegisterAggregator(downloader *Downloader, delegationTarget server. return s, nil } -func (s *specAggregator) addLocalSpec(name string, spec cached.Data[*spec.Swagger]) { +func (s *specAggregator) addLocalSpec(name string, cachedSpec cached.Value[*spec.Swagger]) { service := v1.APIService{} service.Name = name info := &openAPISpecInfo{ apiService: service, } - info.spec.Replace(spec) + info.spec.Store(cachedSpec) s.specsByAPIServiceName[name] = info } // buildMergeSpecLocked creates a new cached mergeSpec from the list of cached specs. -func (s *specAggregator) buildMergeSpecLocked() cached.Data[*spec.Swagger] { +func (s *specAggregator) buildMergeSpecLocked() cached.Value[*spec.Swagger] { apiServices := make([]*v1.APIService, 0, len(s.specsByAPIServiceName)) for k := range s.specsByAPIServiceName { apiServices = append(apiServices, &s.specsByAPIServiceName[k].apiService) } sortByPriority(apiServices) - caches := make([]cached.Data[*spec.Swagger], len(apiServices)) + caches := make([]cached.Value[*spec.Swagger], len(apiServices)) for i, apiService := range apiServices { caches[i] = &(s.specsByAPIServiceName[apiService.Name].spec) } - return cached.NewListMerger(func(results []cached.Result[*spec.Swagger]) cached.Result[*spec.Swagger] { + return cached.MergeList(func(results []cached.Result[*spec.Swagger]) (*spec.Swagger, string, error) { var merged *spec.Swagger etags := make([]string, 0, len(results)) for _, specInfo := range results { - result := specInfo.Get() - if result.Err != nil { + result, etag, err := specInfo.Get() + if err != nil { // APIService name and err message will be included in // the error message as part of decorateError - klog.Warning(result.Err) + klog.Warning(err) continue } if merged == nil { merged = &spec.Swagger{} - *merged = *result.Data + *merged = *result // Paths, Definitions and parameters are set by // MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters merged.Paths = nil merged.Definitions = nil merged.Parameters = nil } - etags = append(etags, result.Etag) - if err := aggregator.MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters(merged, result.Data); err != nil { - return cached.NewResultErr[*spec.Swagger](fmt.Errorf("failed to build merge specs: %v", err)) + etags = append(etags, etag) + if err := aggregator.MergeSpecsIgnorePathConflictRenamingDefinitionsAndParameters(merged, result); err != nil { + return nil, "", fmt.Errorf("failed to build merge specs: %v", err) } } // Printing the etags list is stable because it is sorted. - return cached.NewResultOK(merged, fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%#v", etags))))) + return merged, fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%#v", etags)))), nil }, caches) } @@ -191,15 +192,15 @@ func (s *specAggregator) updateServiceLocked(name string) error { if !exists { return ErrAPIServiceNotFound } - result := specInfo.downloader.Get() - filteredResult := cached.NewTransformer[*spec.Swagger](func(result cached.Result[*spec.Swagger]) cached.Result[*spec.Swagger] { - if result.Err != nil { - return result + result, etag, err := specInfo.downloader.Get() + filteredResult := cached.Transform[*spec.Swagger](func(result *spec.Swagger, etag string, err error) (*spec.Swagger, string, error) { + if err != nil { + return nil, "", err } - return cached.NewResultOK(aggregator.FilterSpecByPathsWithoutSideEffects(result.Data, []string{"/apis/"}), result.Etag) - }, result) - specInfo.spec.Replace(filteredResult) - return result.Err + return aggregator.FilterSpecByPathsWithoutSideEffects(result, []string{"/apis/"}), etag, nil + }, cached.Result[*spec.Swagger]{Value: result, Etag: etag, Err: err}) + specInfo.spec.Store(filteredResult) + return err } // UpdateAPIServiceSpec updates the api service. It is thread safe. @@ -218,16 +219,21 @@ func (s *specAggregator) AddUpdateAPIService(apiService *v1.APIService, handler s.mutex.Lock() defer s.mutex.Unlock() - _, exists := s.specsByAPIServiceName[apiService.Name] + existingSpec, exists := s.specsByAPIServiceName[apiService.Name] if !exists { - s.specsByAPIServiceName[apiService.Name] = &openAPISpecInfo{ + specInfo := &openAPISpecInfo{ apiService: *apiService, - downloader: decorateError(apiService.Name, NewCacheableDownloader(s.downloader, handler)), + downloader: NewCacheableDownloader(apiService.Name, s.downloader, handler), } + specInfo.spec.Store(cached.Result[*spec.Swagger]{Err: fmt.Errorf("spec for apiservice %s is not yet available", apiService.Name)}) + s.specsByAPIServiceName[apiService.Name] = specInfo s.openAPIVersionedService.UpdateSpecLazy(s.buildMergeSpecLocked()) + } else { + existingSpec.apiService = *apiService + existingSpec.downloader.UpdateHandler(handler) } - return s.updateServiceLocked(apiService.Name) + return nil } // RemoveAPIService removes an api service from OpenAPI aggregation. If it does not exist, no error is returned. @@ -243,14 +249,3 @@ func (s *specAggregator) RemoveAPIService(apiServiceName string) { // Re-create the mergeSpec for the new list of apiservices s.openAPIVersionedService.UpdateSpecLazy(s.buildMergeSpecLocked()) } - -// decorateError creates a new cache that wraps a downloader -// cache the name of the apiservice to help with debugging. -func decorateError(name string, cache cached.Data[*spec.Swagger]) cached.Data[*spec.Swagger] { - return cached.NewTransformer(func(result cached.Result[*spec.Swagger]) cached.Result[*spec.Swagger] { - if result.Err != nil { - return cached.NewResultErr[*spec.Swagger](fmt.Errorf("failed to download %v: %v", name, result.Err)) - } - return result - }, cache) -} diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator_test.go index c5ad4e1598f16..1b366e12de1e0 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/aggregator_test.go @@ -25,6 +25,7 @@ import ( "time" "bytes" + v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/validation/spec" @@ -90,6 +91,9 @@ func TestAddUpdateAPIService(t *testing.T) { if err := s.AddUpdateAPIService(apiService, handler); err != nil { t.Error(err) } + if err := s.UpdateAPIServiceSpec(apiService.Name); err != nil { + t.Error(err) + } swagger, err := fetchOpenAPI(mux) if err != nil { @@ -109,7 +113,9 @@ func TestAddUpdateAPIService(t *testing.T) { }, }, } - s.UpdateAPIServiceSpec(apiService.Name) + if err := s.UpdateAPIServiceSpec(apiService.Name); err != nil { + t.Error(err) + } swagger, err = fetchOpenAPI(mux) if err != nil { @@ -158,6 +164,9 @@ func TestAddRemoveAPIService(t *testing.T) { if err := s.AddUpdateAPIService(apiService, handler); err != nil { t.Error(err) } + if err := s.UpdateAPIServiceSpec(apiService.Name); err != nil { + t.Error(err) + } swagger, err := fetchOpenAPI(mux) if err != nil { @@ -178,6 +187,78 @@ func TestAddRemoveAPIService(t *testing.T) { expectPath(t, swagger, "/apis/apiregistration.k8s.io/v1") } +func TestUpdateAPIService(t *testing.T) { + mux := http.NewServeMux() + var delegationHandlers []http.Handler + delegate1 := &openAPIHandler{openapi: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Paths: &spec.Paths{ + Paths: map[string]spec.PathItem{ + "/apis/foo/v1": {}, + }, + }, + }, + }} + delegationHandlers = append(delegationHandlers, delegate1) + + s := buildAndRegisterSpecAggregator(delegationHandlers, mux) + + apiService := &v1.APIService{ + Spec: v1.APIServiceSpec{ + Service: &v1.ServiceReference{Name: "dummy"}, + }, + } + apiService.Name = "apiservice" + + handler := &openAPIHandler{openapi: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Paths: &spec.Paths{ + Paths: map[string]spec.PathItem{ + "/apis/apiservicegroup/v1": {}, + }, + }, + }, + }} + + handler2 := &openAPIHandler{openapi: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Paths: &spec.Paths{ + Paths: map[string]spec.PathItem{}, + }, + }, + }} + + if err := s.AddUpdateAPIService(apiService, handler); err != nil { + t.Error(err) + } + if err := s.UpdateAPIServiceSpec(apiService.Name); err != nil { + t.Error(err) + } + + swagger, err := fetchOpenAPI(mux) + if err != nil { + t.Error(err) + } + expectPath(t, swagger, "/apis/apiservicegroup/v1") + expectPath(t, swagger, "/apis/apiregistration.k8s.io/v1") + + t.Logf("Updating APIService %s", apiService.Name) + if err := s.AddUpdateAPIService(apiService, handler2); err != nil { + t.Error(err) + } + if err := s.UpdateAPIServiceSpec(apiService.Name); err != nil { + t.Error(err) + } + + swagger, err = fetchOpenAPI(mux) + if err != nil { + t.Error(err) + } + // Ensure that the if the APIService is added and then handler is modified, the new data is reflected in the aggregated OpenAPI. + expectNoPath(t, swagger, "/apis/apiservicegroup/v1") + expectPath(t, swagger, "/apis/apiregistration.k8s.io/v1") +} + func TestFailingAPIServiceSkippedAggregation(t *testing.T) { mux := http.NewServeMux() var delegationHandlers []http.Handler @@ -233,8 +314,19 @@ func TestFailingAPIServiceSkippedAggregation(t *testing.T) { }, } - s.AddUpdateAPIService(apiServiceFailed, handlerFailed) - s.AddUpdateAPIService(apiServiceSuccess, handlerSuccess) + if err := s.AddUpdateAPIService(apiServiceSuccess, handlerSuccess); err != nil { + t.Error(err) + } + if err := s.AddUpdateAPIService(apiServiceFailed, handlerFailed); err != nil { + t.Error(err) + } + if err := s.UpdateAPIServiceSpec(apiServiceSuccess.Name); err != nil { + t.Error(err) + } + err := s.UpdateAPIServiceSpec(apiServiceFailed.Name) + if err == nil { + t.Errorf("Expected updating failing apiService %s to return error", apiServiceFailed.Name) + } swagger, err := fetchOpenAPI(mux) if err != nil { @@ -281,7 +373,12 @@ func TestAPIServiceFailSuccessTransition(t *testing.T) { }, } - s.AddUpdateAPIService(apiService, handler) + if err := s.AddUpdateAPIService(apiService, handler); err != nil { + t.Error(err) + } + if err := s.UpdateAPIServiceSpec(apiService.Name); err == nil { + t.Errorf("Expected error for when updating spec for failing apiservice") + } swagger, err := fetchOpenAPI(mux) if err != nil { @@ -304,12 +401,75 @@ func TestAPIServiceFailSuccessTransition(t *testing.T) { expectPath(t, swagger, "/apis/apiservicegroup/v1") } +func TestFailingAPIServiceDoesNotBlockAdd(t *testing.T) { + mux := http.NewServeMux() + var delegationHandlers []http.Handler + delegate1 := &openAPIHandler{openapi: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Paths: &spec.Paths{ + Paths: map[string]spec.PathItem{ + "/apis/foo/v1": {}, + }, + }, + }, + }} + delegationHandlers = append(delegationHandlers, delegate1) + + s := buildAndRegisterSpecAggregator(delegationHandlers, mux) + + apiServiceFailed := &v1.APIService{ + Spec: v1.APIServiceSpec{ + Service: &v1.ServiceReference{Name: "dummy"}, + }, + } + apiServiceFailed.Name = "apiserviceFailed" + + // Create a handler that has a long response time and ensure that + // adding the APIService does not block. + handlerFailed := &openAPIHandler{ + delaySeconds: 5, + returnErr: true, + openapi: &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Paths: &spec.Paths{ + Paths: map[string]spec.PathItem{ + "/apis/failed/v1": {}, + }, + }, + }, + }, + } + + updateDone := make(chan bool) + go func() { + if err := s.AddUpdateAPIService(apiServiceFailed, handlerFailed); err != nil { + t.Error(err) + } + close(updateDone) + }() + + select { + case <-updateDone: + case <-time.After(2 * time.Second): + t.Errorf("AddUpdateAPIService affected by APIService response time") + } + + swagger, err := fetchOpenAPI(mux) + if err != nil { + t.Error(err) + } + expectPath(t, swagger, "/apis/foo/v1") + expectNoPath(t, swagger, "/apis/failed/v1") +} + type openAPIHandler struct { - openapi *spec.Swagger - returnErr bool + delaySeconds int + openapi *spec.Swagger + returnErr bool } func (o *openAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + time.Sleep(time.Duration(o.delaySeconds) * time.Second) if o.returnErr { w.WriteHeader(500) return diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/downloader.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/downloader.go index 3098f593e24c7..03721365805d2 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/downloader.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/aggregator/downloader.go @@ -21,34 +21,55 @@ import ( "fmt" "net/http" "strings" + "sync/atomic" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/validation/spec" ) +type CacheableDownloader interface { + UpdateHandler(http.Handler) + Get() (*spec.Swagger, string, error) +} + // cacheableDownloader is a downloader that will always return the data // and the etag. type cacheableDownloader struct { + name string downloader *Downloader - handler http.Handler - etag string - spec *spec.Swagger + // handler is the http Handler for the apiservice that can be replaced + handler atomic.Pointer[http.Handler] + etag string + spec *spec.Swagger } -// Creates a downloader that also returns the etag, making it useful to use as a cached dependency. -func NewCacheableDownloader(downloader *Downloader, handler http.Handler) cached.Data[*spec.Swagger] { - return &cacheableDownloader{ +// NewCacheableDownloader creates a downloader that also returns the etag, making it useful to use as a cached dependency. +func NewCacheableDownloader(apiServiceName string, downloader *Downloader, handler http.Handler) CacheableDownloader { + c := &cacheableDownloader{ + name: apiServiceName, downloader: downloader, - handler: handler, } + c.handler.Store(&handler) + return c +} +func (d *cacheableDownloader) UpdateHandler(handler http.Handler) { + d.handler.Store(&handler) +} + +func (d *cacheableDownloader) Get() (*spec.Swagger, string, error) { + spec, etag, err := d.get() + if err != nil { + return spec, etag, fmt.Errorf("failed to download %v: %v", d.name, err) + } + return spec, etag, err } -func (d *cacheableDownloader) Get() cached.Result[*spec.Swagger] { - swagger, etag, status, err := d.downloader.Download(d.handler, d.etag) +func (d *cacheableDownloader) get() (*spec.Swagger, string, error) { + h := *d.handler.Load() + swagger, etag, status, err := d.downloader.Download(h, d.etag) if err != nil { - return cached.NewResultErr[*spec.Swagger](err) + return nil, "", err } switch status { case http.StatusNotModified: @@ -61,11 +82,11 @@ func (d *cacheableDownloader) Get() cached.Result[*spec.Swagger] { } fallthrough case http.StatusNotFound: - return cached.NewResultErr[*spec.Swagger](ErrAPIServiceNotFound) + return nil, "", ErrAPIServiceNotFound default: - return cached.NewResultErr[*spec.Swagger](fmt.Errorf("invalid status code: %v", status)) + return nil, "", fmt.Errorf("invalid status code: %v", status) } - return cached.NewResultOK(d.spec, d.etag) + return d.spec, d.etag, nil } // Downloader is the OpenAPI downloader type. It will try to download spec from /openapi/v2 or /swagger.json endpoint. diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go index 5f107150c8e97..69f32f4aa8622 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go @@ -139,7 +139,10 @@ func (c *AggregationController) AddAPIService(handler http.Handler, apiService * // UpdateAPIService updates API Service's info and handler. func (c *AggregationController) UpdateAPIService(handler http.Handler, apiService *v1.APIService) { - if err := c.openAPIAggregationManager.AddUpdateAPIService(apiService, handler); err != nil { + if apiService.Spec.Service == nil { + return + } + if err := c.openAPIAggregationManager.UpdateAPIServiceSpec(apiService.Name); err != nil { utilruntime.HandleError(fmt.Errorf("Error updating APIService %q with err: %v", apiService.Name, err)) } key := apiService.Name diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go index 23632ff32d118..331ae8144e81a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go @@ -81,7 +81,7 @@ func (s *specProxier) GetAPIServiceNames() []string { } // BuildAndRegisterAggregator registered OpenAPI aggregator handler. This function is not thread safe as it only being called on startup. -func BuildAndRegisterAggregator(downloader Downloader, delegationTarget server.DelegationTarget, aggregatorService *restful.Container, openAPIConfig *common.Config, pathHandler common.PathHandlerByGroupVersion) (SpecProxier, error) { +func BuildAndRegisterAggregator(downloader Downloader, delegationTarget server.DelegationTarget, aggregatorService *restful.Container, openAPIConfig *common.OpenAPIV3Config, pathHandler common.PathHandlerByGroupVersion) (SpecProxier, error) { s := &specProxier{ apiServiceInfo: map[string]*openAPIV3APIServiceInfo{}, downloader: downloader, @@ -93,7 +93,7 @@ func BuildAndRegisterAggregator(downloader Downloader, delegationTarget server.D aggregatorLocalServiceName := "k8s_internal_local_kube_aggregator_types" v3Mux := mux.NewPathRecorderMux(aggregatorLocalServiceName) _ = routes.OpenAPI{ - Config: openAPIConfig, + V3Config: openAPIConfig, }.InstallV3(aggregatorService, v3Mux) s.AddUpdateAPIService(v3Mux, &v1.APIService{ diff --git a/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go b/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go index 3dfd1b3709d9b..86edb8a6e33f8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/generated/openapi/zz_generated.openapi.go @@ -530,7 +530,6 @@ func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.Open "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -1086,8 +1085,7 @@ func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -1373,7 +1371,6 @@ func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.Ope "creationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2155,7 +2152,6 @@ func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenA "object": { SchemaProps: spec.SchemaProps{ Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -2354,7 +2350,6 @@ func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.Ope "object": { SchemaProps: spec.SchemaProps{ Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -2598,7 +2593,6 @@ func schema_pkg_apis_apiregistration_v1_APIServiceCondition(ref common.Reference "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2724,7 +2718,7 @@ func schema_pkg_apis_apiregistration_v1_APIServiceSpec(ref common.ReferenceCallb }, "groupPriorityMinimum": { SchemaProps: spec.SchemaProps{ - Description: "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + Description: "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", Default: 0, Type: []string{"integer"}, Format: "int32", @@ -2896,7 +2890,6 @@ func schema_pkg_apis_apiregistration_v1beta1_APIServiceCondition(ref common.Refe "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -3022,7 +3015,7 @@ func schema_pkg_apis_apiregistration_v1beta1_APIServiceSpec(ref common.Reference }, "groupPriorityMinimum": { SchemaProps: spec.SchemaProps{ - Description: "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + Description: "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", Default: 0, Type: []string{"integer"}, Format: "int32", diff --git a/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go b/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go index 6aaa3e1978b1e..df201d90d2c69 100644 --- a/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go +++ b/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go @@ -177,7 +177,7 @@ type AttachDetachControllerConfiguration struct { // This flag enables or disables reconcile. Is false by default, and thus enabled. DisableAttachDetachReconcilerSync bool // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop - // wait between successive executions. Is set to 5 sec by default. + // wait between successive executions. Is set to 60 sec by default. ReconcilerSyncLoopPeriod metav1.Duration } diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index f974598d6f06b..cc8eb1392e78c 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -2,7 +2,7 @@ module k8s.io/kube-controller-manager -go 1.20 +go 1.21.3 require ( k8s.io/apimachinery v0.0.0 @@ -18,8 +18,8 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/text v0.13.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum index f49201eb8d908..54c64a700efa4 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/staging/src/k8s.io/kube-controller-manager/go.sum @@ -13,10 +13,10 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -40,7 +40,7 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -66,7 +66,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -82,31 +83,29 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -115,37 +114,37 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -160,10 +159,10 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go b/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go index e5654a1175c9d..133689ed9da0b 100644 --- a/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go +++ b/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go @@ -26,36 +26,44 @@ import ( // details for the Kubernetes proxy server. type KubeProxyIPTablesConfiguration struct { // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. + // the iptables or ipvs proxy mode. Values must be within the range [0, 31]. MasqueradeBit *int32 `json:"masqueradeBit"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + // masqueradeAll tells kube-proxy to SNAT all traffic sent to Service cluster IPs, + // when using the iptables or ipvs proxy mode. This may be required with some CNI + // plugins. MasqueradeAll bool `json:"masqueradeAll"` - // LocalhostNodePorts tells kube-proxy to allow service NodePorts to be accessed via - // localhost (iptables mode only) + // localhostNodePorts, if false, tells kube-proxy to disable the legacy behavior + // of allowing NodePort services to be accessed via localhost. (Applies only to + // iptables mode and IPv4; localhost NodePorts are never allowed with other proxy + // modes or with IPv6.) LocalhostNodePorts *bool `json:"localhostNodePorts"` - // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. + // syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently + // various re-synchronizing and cleanup operations are performed. Must be greater + // than 0. SyncPeriod metav1.Duration `json:"syncPeriod"` - // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). + // minSyncPeriod is the minimum period between iptables rule resyncs (e.g. '5s', + // '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will + // result in an immediate iptables resync. MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` } // KubeProxyIPVSConfiguration contains ipvs-related configuration // details for the Kubernetes proxy server. type KubeProxyIPVSConfiguration struct { - // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. + // syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently + // various re-synchronizing and cleanup operations are performed. Must be greater + // than 0. SyncPeriod metav1.Duration `json:"syncPeriod"` - // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). + // minSyncPeriod is the minimum period between IPVS rule resyncs (e.g. '5s', '1m', + // '2h22m'). A value of 0 means every Service or EndpointSlice change will result + // in an immediate IPVS resync. MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` - // ipvs scheduler + // scheduler is the IPVS scheduler to use Scheduler string `json:"scheduler"` - // excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch + // excludeCIDRs is a list of CIDRs which the ipvs proxier should not touch // when cleaning up ipvs services. ExcludeCIDRs []string `json:"excludeCIDRs"` - // strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries + // strictARP configures arp_ignore and arp_announce to avoid answering ARP queries // from kube-ipvs0 interface StrictARP bool `json:"strictARP"` // tcpTimeout is the timeout value used for idle IPVS TCP sessions. @@ -76,7 +84,7 @@ type KubeProxyConntrackConfiguration struct { // per CPU core (0 to leave the limit as-is and ignore min). MaxPerCore *int32 `json:"maxPerCore"` // min is the minimum value of connect-tracking records to allocate, - // regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is). + // regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is). Min *int32 `json:"min"` // tcpEstablishedTimeout is how long an idle TCP connection will be kept open // (e.g. '2s'). Must be greater than 0 to set. @@ -85,6 +93,14 @@ type KubeProxyConntrackConfiguration struct { // in CLOSE_WAIT state will remain in the conntrack // table. (e.g. '60s'). Must be greater than 0 to set. TCPCloseWaitTimeout *metav1.Duration `json:"tcpCloseWaitTimeout"` + // udpTimeout is how long an idle UDP conntrack entry in + // UNREPLIED state will remain in the conntrack table + // (e.g. '30s'). Must be greater than 0 to set. + UDPTimeout metav1.Duration `json:"udpTimeout"` + // udpStreamTimeout is how long an idle UDP conntrack entry in + // ASSURED state will remain in the conntrack table + // (e.g. '300s'). Must be greater than 0 to set. + UDPStreamTimeout metav1.Duration `json:"udpStreamTimeout"` } // KubeProxyWinkernelConfiguration contains Windows/HNS settings for @@ -93,29 +109,29 @@ type KubeProxyWinkernelConfiguration struct { // networkName is the name of the network kube-proxy will use // to create endpoints and policies NetworkName string `json:"networkName"` - // sourceVip is the IP address of the source VIP endoint used for + // sourceVip is the IP address of the source VIP endpoint used for // NAT when loadbalancing SourceVip string `json:"sourceVip"` // enableDSR tells kube-proxy whether HNS policies should be created // with DSR EnableDSR bool `json:"enableDSR"` - // RootHnsEndpointName is the name of hnsendpoint that is attached to + // rootHnsEndpointName is the name of hnsendpoint that is attached to // l2bridge for root network namespace RootHnsEndpointName string `json:"rootHnsEndpointName"` - // ForwardHealthCheckVip forwards service VIP for health check port on + // forwardHealthCheckVip forwards service VIP for health check port on // Windows ForwardHealthCheckVip bool `json:"forwardHealthCheckVip"` } // DetectLocalConfiguration contains optional settings related to DetectLocalMode option type DetectLocalConfiguration struct { - // BridgeInterface is a string argument which represents a single bridge interface name. - // Kube-proxy considers traffic as local if originating from this given bridge. - // This argument should be set if DetectLocalMode is set to LocalModeBridgeInterface. + // bridgeInterface is a bridge interface name. When DetectLocalMode is set to + // LocalModeBridgeInterface, kube-proxy will consider traffic to be local if + // it originates from this bridge. BridgeInterface string `json:"bridgeInterface"` - // InterfaceNamePrefix is a string argument which represents a single interface prefix name. - // Kube-proxy considers traffic as local if originating from one or more interfaces which match - // the given prefix. This argument should be set if DetectLocalMode is set to LocalModeInterfaceNamePrefix. + // interfaceNamePrefix is an interface name prefix. When DetectLocalMode is set to + // LocalModeInterfaceNamePrefix, kube-proxy will consider traffic to be local if + // it originates from any interface whose name begins with this prefix. InterfaceNamePrefix string `json:"interfaceNamePrefix"` } @@ -129,66 +145,77 @@ type KubeProxyConfiguration struct { // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. FeatureGates map[string]bool `json:"featureGates,omitempty"` - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) + // clientConnection specifies the kubeconfig file and client connection settings for the proxy + // server to use when communicating with the apiserver. + ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` + // logging specifies the options of logging. + // Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) + // for more information. + Logging logsapi.LoggingConfiguration `json:"logging,omitempty"` + + // hostnameOverride, if non-empty, will be used as the name of the Node that + // kube-proxy is running on. If unset, the node name is assumed to be the same as + // the node's hostname. + HostnameOverride string `json:"hostnameOverride"` + // bindAddress can be used to override kube-proxy's idea of what its node's + // primary IP is. Note that the name is a historical artifact, and kube-proxy does + // not actually bind any sockets to this IP. BindAddress string `json:"bindAddress"` - // healthzBindAddress is the IP address and port for the health check server to serve on, - // defaulting to 0.0.0.0:10256 + // healthzBindAddress is the IP address and port for the health check server to + // serve on, defaulting to "0.0.0.0:10256" (if bindAddress is unset or IPv4), or + // "[::]:10256" (if bindAddress is IPv6). HealthzBindAddress string `json:"healthzBindAddress"` - // metricsBindAddress is the IP address and port for the metrics server to serve on, - // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) + // metricsBindAddress is the IP address and port for the metrics server to serve + // on, defaulting to "127.0.0.1:10249" (if bindAddress is unset or IPv4), or + // "[::1]:10249" (if bindAddress is IPv6). (Set to "0.0.0.0:10249" / "[::]:10249" + // to bind on all interfaces.) MetricsBindAddress string `json:"metricsBindAddress"` - // bindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit + // bindAddressHardFail, if true, tells kube-proxy to treat failure to bind to a + // port as fatal and exit BindAddressHardFail bool `json:"bindAddressHardFail"` // enableProfiling enables profiling via web interface on /debug/pprof handler. // Profiling handlers will be handled by metrics server. EnableProfiling bool `json:"enableProfiling"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // clientConnection specifies the kubeconfig file and client connection settings for the proxy - // server to use when communicating with the apiserver. - ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` + // showHiddenMetricsForVersion is the version for which you want to show hidden metrics. + ShowHiddenMetricsForVersion string `json:"showHiddenMetricsForVersion"` + + // mode specifies which proxy mode to use. + Mode ProxyMode `json:"mode"` // iptables contains iptables-related configuration options. IPTables KubeProxyIPTablesConfiguration `json:"iptables"` // ipvs contains ipvs-related configuration options. IPVS KubeProxyIPVSConfiguration `json:"ipvs"` + // winkernel contains winkernel-related configuration options. + Winkernel KubeProxyWinkernelConfiguration `json:"winkernel"` + + // detectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR + DetectLocalMode LocalMode `json:"detectLocalMode"` + // detectLocal contains optional configuration settings related to DetectLocalMode. + DetectLocal DetectLocalConfiguration `json:"detectLocal"` + // clusterCIDR is the CIDR range of the pods in the cluster. (For dual-stack + // clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When + // DetectLocalMode is set to LocalModeClusterCIDR, kube-proxy will consider + // traffic to be local if its source IP is in this range. (Otherwise it is not + // used.) + ClusterCIDR string `json:"clusterCIDR"` + + // nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, + // connections to NodePort services will only be accepted on node IPs in one of + // the indicated ranges. If unset, NodePort connections will be accepted on all + // local IPs. + NodePortAddresses []string `json:"nodePortAddresses"` + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within // the range [-1000, 1000] OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` // conntrack contains conntrack-related configuration options. Conntrack KubeProxyConntrackConfiguration `json:"conntrack"` // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater // than 0. ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"` - // nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid - // IP blocks. These values are as a parameter to select the interfaces where nodeport works. - // In case someone would like to expose a service on localhost for local visit and some other interfaces for - // particular purpose, a list of IP blocks would do that. - // If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. - // If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. - // An empty string slice is meant to select all network interfaces. - NodePortAddresses []string `json:"nodePortAddresses"` - // winkernel contains winkernel-related configuration options. - Winkernel KubeProxyWinkernelConfiguration `json:"winkernel"` - // ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics. - ShowHiddenMetricsForVersion string `json:"showHiddenMetricsForVersion"` - // DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR - DetectLocalMode LocalMode `json:"detectLocalMode"` - // DetectLocal contains optional configuration settings related to DetectLocalMode. - DetectLocal DetectLocalConfiguration `json:"detectLocal"` - // logging specifies the options of logging. - // Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) - // for more information. - Logging logsapi.LoggingConfiguration `json:"logging,omitempty"` + + // portRange was previously used to configure the userspace proxy, but is now unused. + PortRange string `json:"portRange"` } // ProxyMode represents modes used by the Kubernetes proxy server. diff --git a/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go index 4db886c9a94b8..9b908dc80fa1b 100644 --- a/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go @@ -54,8 +54,16 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { } } out.ClientConnection = in.ClientConnection + in.Logging.DeepCopyInto(&out.Logging) in.IPTables.DeepCopyInto(&out.IPTables) in.IPVS.DeepCopyInto(&out.IPVS) + out.Winkernel = in.Winkernel + out.DetectLocal = in.DetectLocal + if in.NodePortAddresses != nil { + in, out := &in.NodePortAddresses, &out.NodePortAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.OOMScoreAdj != nil { in, out := &in.OOMScoreAdj, &out.OOMScoreAdj *out = new(int32) @@ -63,14 +71,6 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { } in.Conntrack.DeepCopyInto(&out.Conntrack) out.ConfigSyncPeriod = in.ConfigSyncPeriod - if in.NodePortAddresses != nil { - in, out := &in.NodePortAddresses, &out.NodePortAddresses - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.Winkernel = in.Winkernel - out.DetectLocal = in.DetectLocal - in.Logging.DeepCopyInto(&out.Logging) return } @@ -115,6 +115,8 @@ func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackC *out = new(v1.Duration) **out = **in } + out.UDPTimeout = in.UDPTimeout + out.UDPStreamTimeout = in.UDPStreamTimeout return } diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index 6f4df5b9886dd..e13945e04407c 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -2,7 +2,7 @@ module k8s.io/kube-proxy -go 1.20 +go 1.21.3 require ( k8s.io/apimachinery v0.0.0 @@ -30,9 +30,9 @@ require ( github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum index 54b99504613a9..2b410e8043e8d 100644 --- a/staging/src/k8s.io/kube-proxy/go.sum +++ b/staging/src/k8s.io/kube-proxy/go.sum @@ -14,7 +14,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -43,7 +43,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -70,7 +70,8 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -91,28 +92,26 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -120,9 +119,9 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -131,28 +130,27 @@ golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -169,7 +167,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index ead07d4456cce..e1bc34711a2f4 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -2,7 +2,7 @@ module k8s.io/kube-scheduler -go 1.20 +go 1.21.3 require ( github.com/google/go-cmp v0.5.9 @@ -19,8 +19,8 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/text v0.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum index 7e5ae7e1368f1..35a7dbee97e7d 100644 --- a/staging/src/k8s.io/kube-scheduler/go.sum +++ b/staging/src/k8s.io/kube-scheduler/go.sum @@ -7,7 +7,7 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -30,7 +30,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -52,7 +52,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -67,27 +68,25 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -95,36 +94,35 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -138,7 +136,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/kubectl/SECURITY_CONTACTS b/staging/src/k8s.io/kubectl/SECURITY_CONTACTS index 85157ea614c9b..24755cc4138ce 100644 --- a/staging/src/k8s.io/kubectl/SECURITY_CONTACTS +++ b/staging/src/k8s.io/kubectl/SECURITY_CONTACTS @@ -10,6 +10,7 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -pwittrock -seans3 +eddiezane +KnVerey +natasha41575 soltysh diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod index cb4b22053675c..3b810afa3bf34 100644 --- a/staging/src/k8s.io/kubectl/go.mod +++ b/staging/src/k8s.io/kubectl/go.mod @@ -2,7 +2,7 @@ module k8s.io/kubectl -go 1.20 +go 1.21.3 require ( github.com/MakeNowJust/heredoc v1.0.0 @@ -21,14 +21,14 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/mitchellh/go-wordwrap v1.0.1 github.com/moby/term v0.0.0-20221205130635-1aeaba878587 - github.com/onsi/ginkgo/v2 v2.9.4 - github.com/onsi/gomega v1.27.6 + github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/gomega v1.28.0 github.com/pkg/errors v0.9.1 github.com/russross/blackfriday/v2 v2.1.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - golang.org/x/sys v0.10.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/sys v0.13.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 @@ -37,7 +37,7 @@ require ( k8s.io/component-base v0.0.0 k8s.io/component-helpers v0.0.0 k8s.io/klog/v2 v2.100.1 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/metrics v0.0.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd @@ -50,7 +50,7 @@ require ( require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -81,13 +81,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum index f923cc26f0146..89cc05a0b8eaf 100644 --- a/staging/src/k8s.io/kubectl/go.sum +++ b/staging/src/k8s.io/kubectl/go.sum @@ -1,5 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -31,8 +32,8 @@ github.com/daviddengcn/go-colortext v1.0.0 h1:ANqDyC0ys6qCSvuEK7l3g5RaehL/Xck9EX github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -108,7 +109,7 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -153,10 +154,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -190,39 +191,38 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -232,18 +232,18 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -252,16 +252,16 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -272,8 +272,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -286,13 +286,12 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -318,11 +317,11 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go index 8ef7d7c814ef0..3347e34173b39 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_test.go @@ -2403,7 +2403,7 @@ kind: Secret metadata: annotations: applyset.kubernetes.io/additional-namespaces: "" - applyset.kubernetes.io/contains-group-resources: replicationcontrollers + applyset.kubernetes.io/contains-group-kinds: ReplicationController applyset.kubernetes.io/tooling: kubectl/v0.0.0-master+$Format:%H$ creationTimestamp: null labels: @@ -2437,7 +2437,7 @@ kind: Secret metadata: annotations: applyset.kubernetes.io/additional-namespaces: "" - applyset.kubernetes.io/contains-group-resources: replicationcontrollers,services + applyset.kubernetes.io/contains-group-kinds: ReplicationController,Service applyset.kubernetes.io/tooling: kubectl/v0.0.0-master+$Format:%H$ creationTimestamp: null labels: @@ -2472,7 +2472,7 @@ kind: Secret metadata: annotations: applyset.kubernetes.io/additional-namespaces: "" - applyset.kubernetes.io/contains-group-resources: replicationcontrollers,services + applyset.kubernetes.io/contains-group-kinds: ReplicationController,Service applyset.kubernetes.io/tooling: kubectl/v0.0.0-master+$Format:%H$ creationTimestamp: null labels: @@ -2507,7 +2507,7 @@ kind: Secret metadata: annotations: applyset.kubernetes.io/additional-namespaces: "" - applyset.kubernetes.io/contains-group-resources: services + applyset.kubernetes.io/contains-group-kinds: Service applyset.kubernetes.io/tooling: kubectl/v0.0.0-master+$Format:%H$ creationTimestamp: null labels: @@ -2524,60 +2524,60 @@ func TestApplySetInvalidLiveParent(t *testing.T) { defer tf.Cleanup() type testCase struct { - grsAnnotation string + gksAnnotation string toolingAnnotation string idLabel string expectErr string } validIDLabel := "applyset-0eFHV8ySqp7XoShsGvyWFQD3s96yqwHmzc4e0HR1dsY-v1" validToolingAnnotation := "kubectl/v1.27.0" - validGrsAnnotation := "deployments.apps,namespaces,secrets" + validGksAnnotation := "Deployment.apps,Namespace,Secret" for name, test := range map[string]testCase{ "group-resources annotation is required": { - grsAnnotation: "", + gksAnnotation: "", toolingAnnotation: validToolingAnnotation, idLabel: validIDLabel, - expectErr: "error: parsing ApplySet annotation on \"secrets./my-set\": kubectl requires the \"applyset.kubernetes.io/contains-group-resources\" annotation to be set on all ApplySet parent objects", + expectErr: "error: parsing ApplySet annotation on \"secrets./my-set\": kubectl requires the \"applyset.kubernetes.io/contains-group-kinds\" annotation to be set on all ApplySet parent objects", }, "group-resources annotation should not contain invalid resources": { - grsAnnotation: "does-not-exist", + gksAnnotation: "does-not-exist", toolingAnnotation: validToolingAnnotation, idLabel: validIDLabel, - expectErr: "error: parsing ApplySet annotation on \"secrets./my-set\": invalid group resource in \"applyset.kubernetes.io/contains-group-resources\" annotation: no matches for /, Resource=does-not-exist", + expectErr: "error: parsing ApplySet annotation on \"secrets./my-set\": could not find mapping for kind in \"applyset.kubernetes.io/contains-group-kinds\" annotation: no matches for kind \"does-not-exist\" in group \"\"", }, "tooling annotation is required": { - grsAnnotation: validGrsAnnotation, + gksAnnotation: validGksAnnotation, toolingAnnotation: "", idLabel: validIDLabel, expectErr: "error: ApplySet parent object \"secrets./my-set\" already exists and is missing required annotation \"applyset.kubernetes.io/tooling\"", }, "tooling annotation must have kubectl prefix": { - grsAnnotation: validGrsAnnotation, + gksAnnotation: validGksAnnotation, toolingAnnotation: "helm/v3", idLabel: validIDLabel, expectErr: "error: ApplySet parent object \"secrets./my-set\" already exists and is managed by tooling \"helm\" instead of \"kubectl\"", }, "tooling annotation with invalid prefix with one segment can be parsed": { - grsAnnotation: validGrsAnnotation, + gksAnnotation: validGksAnnotation, toolingAnnotation: "helm", idLabel: validIDLabel, expectErr: "error: ApplySet parent object \"secrets./my-set\" already exists and is managed by tooling \"helm\" instead of \"kubectl\"", }, "tooling annotation with invalid prefix with many segments can be parsed": { - grsAnnotation: validGrsAnnotation, + gksAnnotation: validGksAnnotation, toolingAnnotation: "example.com/tool/why/v1", idLabel: validIDLabel, expectErr: "error: ApplySet parent object \"secrets./my-set\" already exists and is managed by tooling \"example.com/tool/why\" instead of \"kubectl\"", }, "ID label is required": { - grsAnnotation: validGrsAnnotation, + gksAnnotation: validGksAnnotation, toolingAnnotation: validToolingAnnotation, idLabel: "", expectErr: "error: ApplySet parent object \"secrets./my-set\" exists and does not have required label applyset.kubernetes.io/id", }, "ID label must match the ApplySet's real ID": { - grsAnnotation: validGrsAnnotation, + gksAnnotation: validGksAnnotation, toolingAnnotation: validToolingAnnotation, idLabel: "somethingelse", expectErr: fmt.Sprintf("error: ApplySet parent object \"secrets./my-set\" exists and has incorrect value for label \"applyset.kubernetes.io/id\" (got: somethingelse, want: %s)", validIDLabel), @@ -2596,8 +2596,8 @@ func TestApplySetInvalidLiveParent(t *testing.T) { secret.SetNamespace("test") annotations := make(map[string]string) labels := make(map[string]string) - if test.grsAnnotation != "" { - annotations[ApplySetGRsAnnotation] = test.grsAnnotation + if test.gksAnnotation != "" { + annotations[ApplySetGKsAnnotation] = test.gksAnnotation } if test.toolingAnnotation != "" { annotations[ApplySetToolingAnnotation] = test.toolingAnnotation @@ -2670,7 +2670,7 @@ kind: ApplySet metadata: annotations: applyset.kubernetes.io/additional-namespaces: test - applyset.kubernetes.io/contains-group-resources: replicationcontrollers + applyset.kubernetes.io/contains-group-kinds: ReplicationController applyset.kubernetes.io/tooling: kubectl/v0.0.0-master+$Format:%H$ creationTimestamp: null labels: diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset.go index d4bdd5889e329..4fd6dd8ed7508 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset.go @@ -54,13 +54,22 @@ const ( // Example value: "kube-system,ns1,ns2". ApplySetAdditionalNamespacesAnnotation = "applyset.kubernetes.io/additional-namespaces" - // ApplySetGRsAnnotation is a list of group-resources used to optimize listing of ApplySet member objects. + // Deprecated: ApplySetGRsAnnotation is a list of group-resources used to optimize listing of ApplySet member objects. // It is optional in the ApplySet specification, as tools can perform discovery or use a different optimization. // However, it is currently required in kubectl. - // When present, the value of this annotation must be a comma separated list of the group-kinds, + // When present, the value of this annotation must be a comma separated list of the group-resources, // in the fully-qualified name format, i.e. .. // Example value: "certificates.cert-manager.io,configmaps,deployments.apps,secrets,services" - ApplySetGRsAnnotation = "applyset.kubernetes.io/contains-group-resources" + // Deprecated and replaced by ApplySetGKsAnnotation, support for this can be removed in applyset beta or GA. + DeprecatedApplySetGRsAnnotation = "applyset.kubernetes.io/contains-group-resources" + + // ApplySetGKsAnnotation is a list of group-kinds used to optimize listing of ApplySet member objects. + // It is optional in the ApplySet specification, as tools can perform discovery or use a different optimization. + // However, it is currently required in kubectl. + // When present, the value of this annotation must be a comma separated list of the group-kinds, + // in the fully-qualified name format, i.e. .. + // Example value: "Certificate.cert-manager.io,ConfigMap,deployments.apps,Secret,Service" + ApplySetGKsAnnotation = "applyset.kubernetes.io/contains-group-kinds" // ApplySetParentIDLabel is the key of the label that makes object an ApplySet parent object. // Its value MUST use the format specified in V1ApplySetIdFormat below @@ -92,13 +101,13 @@ type ApplySet struct { toolingID ApplySetTooling // currentResources is the set of resources that are part of the sever-side set as of when the current operation started. - currentResources map[schema.GroupVersionResource]*meta.RESTMapping + currentResources map[schema.GroupKind]*kindInfo // currentNamespaces is the set of namespaces that contain objects in this applyset as of when the current operation started. currentNamespaces sets.Set[string] // updatedResources is the set of resources that will be part of the set as of when the current operation completes. - updatedResources map[schema.GroupVersionResource]*meta.RESTMapping + updatedResources map[schema.GroupKind]*kindInfo // updatedNamespaces is the set of namespaces that will contain objects in this applyset as of when the current operation completes. updatedNamespaces sets.Set[string] @@ -143,9 +152,9 @@ func (t ApplySetTooling) String() string { // NewApplySet creates a new ApplySet object tracked by the given parent object. func NewApplySet(parent *ApplySetParentRef, tooling ApplySetTooling, mapper meta.RESTMapper, client resource.RESTClient) *ApplySet { return &ApplySet{ - currentResources: make(map[schema.GroupVersionResource]*meta.RESTMapping), + currentResources: make(map[schema.GroupKind]*kindInfo), currentNamespaces: make(sets.Set[string]), - updatedResources: make(map[schema.GroupVersionResource]*meta.RESTMapping), + updatedResources: make(map[schema.GroupKind]*kindInfo), updatedNamespaces: make(sets.Set[string]), parentRef: parent, toolingID: tooling, @@ -284,7 +293,7 @@ func (a *ApplySet) fetchParent() error { return fmt.Errorf("ApplySet parent object %q exists and has incorrect value for label %q (got: %s, want: %s)", a.parentRef, ApplySetParentIDLabel, idLabel, a.ID()) } - if a.currentResources, err = parseResourcesAnnotation(annotations, a.restMapper); err != nil { + if a.currentResources, err = parseKindAnnotation(annotations, a.restMapper); err != nil { // TODO: handle GVRs for now-deleted CRDs return fmt.Errorf("parsing ApplySet annotation on %q: %w", a.parentRef, err) } @@ -302,8 +311,8 @@ func (a *ApplySet) LabelSelectorForMembers() string { // AllPrunableResources returns the list of all resources that should be considered for pruning. // This is potentially a superset of the resources types that actually contain resources. -func (a *ApplySet) AllPrunableResources() []*meta.RESTMapping { - var ret []*meta.RESTMapping +func (a *ApplySet) AllPrunableResources() []*kindInfo { + var ret []*kindInfo for _, m := range a.currentResources { ret = append(ret, m) } @@ -336,14 +345,43 @@ func toolingBaseName(toolAnnotation string) string { return toolAnnotation } -func parseResourcesAnnotation(annotations map[string]string, mapper meta.RESTMapper) (map[schema.GroupVersionResource]*meta.RESTMapping, error) { - annotation, ok := annotations[ApplySetGRsAnnotation] +// kindInfo holds type information about a particular resource type. +type kindInfo struct { + restMapping *meta.RESTMapping +} + +func parseKindAnnotation(annotations map[string]string, mapper meta.RESTMapper) (map[schema.GroupKind]*kindInfo, error) { + annotation, ok := annotations[ApplySetGKsAnnotation] if !ok { + if annotations[DeprecatedApplySetGRsAnnotation] != "" { + return parseDeprecatedResourceAnnotation(annotations[DeprecatedApplySetGRsAnnotation], mapper) + } + // The spec does not require this annotation. However, 'missing' means 'perform discovery'. // We return an error because we do not currently support dynamic discovery in kubectl apply. - return nil, fmt.Errorf("kubectl requires the %q annotation to be set on all ApplySet parent objects", ApplySetGRsAnnotation) + return nil, fmt.Errorf("kubectl requires the %q annotation to be set on all ApplySet parent objects", ApplySetGKsAnnotation) + } + mappings := make(map[schema.GroupKind]*kindInfo) + // Annotation present but empty means that this is currently an empty set. + if annotation == "" { + return mappings, nil + } + for _, gkString := range strings.Split(annotation, ",") { + gk := schema.ParseGroupKind(gkString) + restMapping, err := mapper.RESTMapping(gk) + if err != nil { + return nil, fmt.Errorf("could not find mapping for kind in %q annotation: %w", ApplySetGKsAnnotation, err) + } + mappings[gk] = &kindInfo{ + restMapping: restMapping, + } } - mappings := make(map[schema.GroupVersionResource]*meta.RESTMapping) + + return mappings, nil +} + +func parseDeprecatedResourceAnnotation(annotation string, mapper meta.RESTMapper) (map[schema.GroupKind]*kindInfo, error) { + mappings := make(map[schema.GroupKind]*kindInfo) // Annotation present but empty means that this is currently an empty set. if annotation == "" { return mappings, nil @@ -352,13 +390,15 @@ func parseResourcesAnnotation(annotations map[string]string, mapper meta.RESTMap gr := schema.ParseGroupResource(grString) gvk, err := mapper.KindFor(gr.WithVersion("")) if err != nil { - return nil, fmt.Errorf("invalid group resource in %q annotation: %w", ApplySetGRsAnnotation, err) + return nil, fmt.Errorf("invalid group resource in %q annotation: %w", DeprecatedApplySetGRsAnnotation, err) } - mapping, err := mapper.RESTMapping(gvk.GroupKind()) + restMapping, err := mapper.RESTMapping(gvk.GroupKind()) if err != nil { - return nil, fmt.Errorf("could not find kind for resource in %q annotation: %w", ApplySetGRsAnnotation, err) + return nil, fmt.Errorf("could not find kind for resource in %q annotation: %w", DeprecatedApplySetGRsAnnotation, err) + } + mappings[gvk.GroupKind()] = &kindInfo{ + restMapping: restMapping, } - mappings[mapping.Resource] = mapping } return mappings, nil } @@ -377,9 +417,14 @@ func parseNamespacesAnnotation(annotations map[string]string) sets.Set[string] { // addResource registers the given resource and namespace as being part of the updated set of // resources being applied by the current operation. -func (a *ApplySet) addResource(resource *meta.RESTMapping, namespace string) { - a.updatedResources[resource.Resource] = resource - if resource.Scope == meta.RESTScopeNamespace && namespace != "" { +func (a *ApplySet) addResource(restMapping *meta.RESTMapping, namespace string) { + gk := restMapping.GroupVersionKind.GroupKind() + if _, found := a.updatedResources[gk]; !found { + a.updatedResources[gk] = &kindInfo{ + restMapping: restMapping, + } + } + if restMapping.Scope == meta.RESTScopeNamespace && namespace != "" { a.updatedNamespaces.Insert(namespace) } } @@ -394,6 +439,8 @@ func (a *ApplySet) updateParent(mode ApplySetUpdateMode, dryRun cmdutil.DryRunSt if err != nil { return fmt.Errorf("failed to encode patch for ApplySet parent: %w", err) } + // Note that because we are using SSA, we will remove any annotations we don't specify, + // which is how we remove the deprecated contains-group-resources annotation. err = serverSideApplyRequest(a, data, dryRun, validation, false) if err != nil && errors.IsConflict(err) { // Try again with conflicts forced @@ -429,17 +476,17 @@ func serverSideApplyRequest(a *ApplySet, data []byte, dryRun cmdutil.DryRunStrat } func (a *ApplySet) buildParentPatch(mode ApplySetUpdateMode) *metav1.PartialObjectMetadata { - var newGRsAnnotation, newNsAnnotation string + var newGKsAnnotation, newNsAnnotation string switch mode { case updateToSuperset: // If the apply succeeded but pruning failed, the set of group resources that // the ApplySet should track is the superset of the previous and current resources. // This ensures that the resources that failed to be pruned are not orphaned from the set. grSuperset := sets.KeySet(a.currentResources).Union(sets.KeySet(a.updatedResources)) - newGRsAnnotation = generateResourcesAnnotation(grSuperset) + newGKsAnnotation = generateKindsAnnotation(grSuperset) newNsAnnotation = generateNamespacesAnnotation(a.currentNamespaces.Union(a.updatedNamespaces), a.parentRef.Namespace) case updateToLatestSet: - newGRsAnnotation = generateResourcesAnnotation(sets.KeySet(a.updatedResources)) + newGKsAnnotation = generateKindsAnnotation(sets.KeySet(a.updatedResources)) newNsAnnotation = generateNamespacesAnnotation(a.updatedNamespaces, a.parentRef.Namespace) } @@ -453,7 +500,7 @@ func (a *ApplySet) buildParentPatch(mode ApplySetUpdateMode) *metav1.PartialObje Namespace: a.parentRef.Namespace, Annotations: map[string]string{ ApplySetToolingAnnotation: a.toolingID.String(), - ApplySetGRsAnnotation: newGRsAnnotation, + ApplySetGKsAnnotation: newGKsAnnotation, ApplySetAdditionalNamespacesAnnotation: newNsAnnotation, }, Labels: map[string]string{ @@ -469,13 +516,13 @@ func generateNamespacesAnnotation(namespaces sets.Set[string], skip string) stri return strings.Join(nsList, ",") } -func generateResourcesAnnotation(resources sets.Set[schema.GroupVersionResource]) string { - var grs []string - for gvr := range resources { - grs = append(grs, gvr.GroupResource().String()) +func generateKindsAnnotation(resources sets.Set[schema.GroupKind]) string { + var gks []string + for gk := range resources { + gks = append(gks, gk.String()) } - sort.Strings(grs) - return strings.Join(grs, ",") + sort.Strings(gks) + return strings.Join(gks, ",") } func (a ApplySet) FieldManager() string { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset_pruner.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset_pruner.go index 9308498d176d3..3c064af36f2c0 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset_pruner.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/applyset_pruner.go @@ -77,27 +77,29 @@ func (a *ApplySet) FindAllObjectsToPrune(ctx context.Context, dynamicClient dyna // We run discovery in parallel, in as many goroutines as priority and fairness will allow // (We don't expect many requests in real-world scenarios - maybe tens, unlikely to be hundreds) - for _, restMapping := range a.AllPrunableResources() { - switch restMapping.Scope.Name() { + for gvk, resource := range a.AllPrunableResources() { + scope := resource.restMapping.Scope + + switch scope.Name() { case meta.RESTScopeNameNamespace: for _, namespace := range a.AllPrunableNamespaces() { if namespace == "" { // Just double-check because otherwise we get cryptic error messages - return nil, fmt.Errorf("unexpectedly encountered empty namespace during prune of namespace-scoped resource %v", restMapping.GroupVersionKind) + return nil, fmt.Errorf("unexpectedly encountered empty namespace during prune of namespace-scoped resource %v", gvk) } tasks = append(tasks, &task{ namespace: namespace, - restMapping: restMapping, + restMapping: resource.restMapping, }) } case meta.RESTScopeNameRoot: tasks = append(tasks, &task{ - restMapping: restMapping, + restMapping: resource.restMapping, }) default: - return nil, fmt.Errorf("unhandled scope %q", restMapping.Scope.Name()) + return nil, fmt.Errorf("unhandled scope %q", scope.Name()) } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go b/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go index 263e006c492e3..af25e072941d2 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go @@ -28,6 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/cli-runtime/pkg/resource" @@ -125,7 +126,7 @@ func NewCmdAttach(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra. // RemoteAttach defines the interface accepted by the Attach command - provided for test stubbing type RemoteAttach interface { - Attach(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error + Attach(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error } // DefaultAttachFunc is the default AttachFunc used @@ -148,7 +149,7 @@ func DefaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, ra TTY: raw, }, scheme.ParameterCodec) - return o.Attach.Attach("POST", req.URL(), o.Config, o.In, o.Out, o.ErrOut, raw, sizeQueue) + return o.Attach.Attach(req.URL(), o.Config, o.In, o.Out, o.ErrOut, raw, sizeQueue) } } @@ -156,11 +157,24 @@ func DefaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, ra type DefaultRemoteAttach struct{} // Attach executes attach to a running container -func (*DefaultRemoteAttach) Attach(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { - exec, err := remotecommand.NewSPDYExecutor(config, method, url) +func (*DefaultRemoteAttach) Attach(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { + // Legacy SPDY executor is default. If feature gate enabled, fallback + // executor attempts websockets first--then SPDY. + exec, err := remotecommand.NewSPDYExecutor(config, "POST", url) if err != nil { return err } + if cmdutil.RemoteCommandWebsockets.IsEnabled() { + // WebSocketExecutor must be "GET" method as described in RFC 6455 Sec. 4.1 (page 17). + websocketExec, err := remotecommand.NewWebSocketExecutor(config, "GET", url.String()) + if err != nil { + return err + } + exec, err = remotecommand.NewFallbackExecutor(websocketExec, exec, httpstream.IsUpgradeFailure) + if err != nil { + return err + } + } return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{ Stdin: stdin, Stdout: stdout, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach_test.go index 24b6e71d2f1aa..6d491323ebbfc 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/attach/attach_test.go @@ -43,13 +43,11 @@ import ( ) type fakeRemoteAttach struct { - method string - url *url.URL - err error + url *url.URL + err error } -func (f *fakeRemoteAttach) Attach(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { - f.method = method +func (f *fakeRemoteAttach) Attach(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { f.url = url return f.err } @@ -327,7 +325,7 @@ func TestAttach(t *testing.T) { return err } - return options.Attach.Attach("POST", u, nil, nil, nil, nil, raw, sizeQueue) + return options.Attach.Attach(u, nil, nil, nil, nil, raw, sizeQueue) } } @@ -347,9 +345,6 @@ func TestAttach(t *testing.T) { t.Errorf("%s: Did not get expected path for exec request: %q %q", test.name, test.attachPath, remoteAttach.url.Path) return } - if remoteAttach.method != "POST" { - t.Errorf("%s: Did not get method for attach request: %s", test.name, remoteAttach.method) - } if remoteAttach.url.Query().Get("container") != "bar" { t.Errorf("%s: Did not have query parameters: %s", test.name, remoteAttach.url.Query()) } @@ -428,7 +423,7 @@ func TestAttachWarnings(t *testing.T) { return err } - return options.Attach.Attach("POST", u, nil, nil, nil, nil, raw, sizeQueue) + return options.Attach.Attach(u, nil, nil, nil, nil, raw, sizeQueue) } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go b/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go index 6b3a84d50fbf7..7907ffe6c4667 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/cmd.go @@ -92,15 +92,18 @@ type KubectlOptions struct { genericiooptions.IOStreams } -var defaultConfigFlags = genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag().WithDiscoveryBurst(300).WithDiscoveryQPS(50.0) +func defaultConfigFlags() *genericclioptions.ConfigFlags { + return genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag().WithDiscoveryBurst(300).WithDiscoveryQPS(50.0) +} // NewDefaultKubectlCommand creates the `kubectl` command with default arguments func NewDefaultKubectlCommand() *cobra.Command { + ioStreams := genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr} return NewDefaultKubectlCommandWithArgs(KubectlOptions{ PluginHandler: NewDefaultPluginHandler(plugin.ValidPluginFilenamePrefixes), Arguments: os.Args, - ConfigFlags: defaultConfigFlags, - IOStreams: genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}, + ConfigFlags: defaultConfigFlags().WithWarningPrinter(ioStreams), + IOStreams: ioStreams, }) } @@ -139,17 +142,12 @@ func NewDefaultKubectlCommandWithArgs(o KubectlOptions) *cobra.Command { } } } else if err == nil { - if cmdutil.CmdPluginAsSubcommand.IsEnabled() { + if !cmdutil.CmdPluginAsSubcommand.IsDisabled() { // Command exists(e.g. kubectl create), but it is not certain that // subcommand also exists (e.g. kubectl create networkpolicy) - if IsSubcommandPluginAllowed(foundCmd.Name()) { - var subcommand string - for _, arg := range foundArgs { // first "non-flag" argument as subcommand - if !strings.HasPrefix(arg, "-") { - subcommand = arg - break - } - } + // we also have to eliminate kubectl create -f + if IsSubcommandPluginAllowed(foundCmd.Name()) && len(foundArgs) >= 1 && !strings.HasPrefix(foundArgs[0], "-") { + subcommand := foundArgs[0] builtinSubcmdExist := false for _, subcmd := range foundCmd.Commands() { if subcmd.Name() == subcommand { @@ -364,7 +362,7 @@ func NewKubectlCommand(o KubectlOptions) *cobra.Command { kubeConfigFlags := o.ConfigFlags if kubeConfigFlags == nil { - kubeConfigFlags = defaultConfigFlags + kubeConfigFlags = defaultConfigFlags().WithWarningPrinter(o.IOStreams) } kubeConfigFlags.AddFlags(flags) matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/cmd_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/cmd_test.go index c3f4a6d8be389..84653461c9725 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/cmd_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/cmd_test.go @@ -29,8 +29,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/kubectl/pkg/cmd/plugin" - cmdtesting "k8s.io/kubectl/pkg/cmd/testing" - cmdutil "k8s.io/kubectl/pkg/cmd/util" ) func TestNormalizationFuncGlobalExistence(t *testing.T) { @@ -129,47 +127,45 @@ func TestKubectlSubcommandShadowPlugin(t *testing.T) { } for _, test := range tests { - cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.CmdPluginAsSubcommand}, t, func(t *testing.T) { - t.Run(test.name, func(t *testing.T) { - pluginsHandler := &testPluginHandler{ - pluginsDirectory: "plugin/testdata", - validPrefixes: plugin.ValidPluginFilenamePrefixes, - } - ioStreams, _, _, _ := genericiooptions.NewTestIOStreams() - root := NewDefaultKubectlCommandWithArgs(KubectlOptions{PluginHandler: pluginsHandler, Arguments: test.args, IOStreams: ioStreams}) - // original plugin handler (DefaultPluginHandler) is implemented by exec call so no additional actions are expected on the cobra command if we activate the plugin flow - if !pluginsHandler.lookedup && !pluginsHandler.executed { - // args must be set, otherwise Execute will use os.Args (args used for starting the test) and test.args would not be passed - // to the command which might invoke only "kubectl" without any additional args and give false positives - root.SetArgs(test.args[1:]) - // Important note! Incorrect command or command failing validation might just call os.Exit(1) which would interrupt execution of the test - if err := root.Execute(); err != nil { - t.Fatalf("unexpected error: %v", err) - } + t.Run(test.name, func(t *testing.T) { + pluginsHandler := &testPluginHandler{ + pluginsDirectory: "plugin/testdata", + validPrefixes: plugin.ValidPluginFilenamePrefixes, + } + ioStreams, _, _, _ := genericiooptions.NewTestIOStreams() + root := NewDefaultKubectlCommandWithArgs(KubectlOptions{PluginHandler: pluginsHandler, Arguments: test.args, IOStreams: ioStreams}) + // original plugin handler (DefaultPluginHandler) is implemented by exec call so no additional actions are expected on the cobra command if we activate the plugin flow + if !pluginsHandler.lookedup && !pluginsHandler.executed { + // args must be set, otherwise Execute will use os.Args (args used for starting the test) and test.args would not be passed + // to the command which might invoke only "kubectl" without any additional args and give false positives + root.SetArgs(test.args[1:]) + // Important note! Incorrect command or command failing validation might just call os.Exit(1) which would interrupt execution of the test + if err := root.Execute(); err != nil { + t.Fatalf("unexpected error: %v", err) } + } - if (pluginsHandler.lookupErr != nil && pluginsHandler.lookupErr.Error() != test.expectLookupError) || - (pluginsHandler.lookupErr == nil && len(test.expectLookupError) > 0) { - t.Fatalf("unexpected error: expected %q to occur, but got %q", test.expectLookupError, pluginsHandler.lookupErr) - } + if (pluginsHandler.lookupErr != nil && pluginsHandler.lookupErr.Error() != test.expectLookupError) || + (pluginsHandler.lookupErr == nil && len(test.expectLookupError) > 0) { + t.Fatalf("unexpected error: expected %q to occur, but got %q", test.expectLookupError, pluginsHandler.lookupErr) + } - if pluginsHandler.lookedup && !pluginsHandler.executed && len(test.expectLookupError) == 0 { - // we have to fail here, because we have found the plugin, but not executed the plugin, nor the command (this would normally result in an error: unknown command) - t.Fatalf("expected plugin execution, but did not occur") - } + if pluginsHandler.lookedup && !pluginsHandler.executed && len(test.expectLookupError) == 0 { + // we have to fail here, because we have found the plugin, but not executed the plugin, nor the command (this would normally result in an error: unknown command) + t.Fatalf("expected plugin execution, but did not occur") + } - if pluginsHandler.executedPlugin != test.expectPlugin { - t.Fatalf("unexpected plugin execution: expected %q, got %q", test.expectPlugin, pluginsHandler.executedPlugin) - } + if pluginsHandler.executedPlugin != test.expectPlugin { + t.Fatalf("unexpected plugin execution: expected %q, got %q", test.expectPlugin, pluginsHandler.executedPlugin) + } - if pluginsHandler.executed && len(test.expectPlugin) == 0 { - t.Fatalf("unexpected plugin execution: expected no plugin, got %q", pluginsHandler.executedPlugin) - } + if pluginsHandler.executed && len(test.expectPlugin) == 0 { + t.Fatalf("unexpected plugin execution: expected no plugin, got %q", pluginsHandler.executedPlugin) + } - if !cmp.Equal(pluginsHandler.withArgs, test.expectPluginArgs, cmpopts.EquateEmpty()) { - t.Fatalf("unexpected plugin execution args: expected %q, got %q", test.expectPluginArgs, pluginsHandler.withArgs) - } - }) + if !cmp.Equal(pluginsHandler.withArgs, test.expectPluginArgs, cmpopts.EquateEmpty()) { + t.Fatalf("unexpected plugin execution args: expected %q, got %q", test.expectPluginArgs, pluginsHandler.withArgs) + } }) } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_token.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_token.go index 77a3dfc1290c6..e2c7a7270ed71 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_token.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_token.go @@ -23,6 +23,7 @@ import ( "time" "github.com/spf13/cobra" + "github.com/spf13/pflag" authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,6 +47,9 @@ type TokenOptions struct { PrintFlags *genericclioptions.PrintFlags PrintObj func(obj runtime.Object) error + // Flags hold the parsed CLI flags. + Flags *pflag.FlagSet + // Name and namespace of service account to create a token for Name string Namespace string @@ -137,7 +141,7 @@ func NewCmdCreateToken(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) cmd.Flags().StringArrayVar(&o.Audiences, "audience", o.Audiences, "Audience of the requested token. If unset, defaults to requesting a token for use with the Kubernetes API server. May be repeated to request a token valid for multiple audiences.") - cmd.Flags().DurationVar(&o.Duration, "duration", o.Duration, "Requested lifetime of the issued token. The server may return a token with a longer or shorter lifetime.") + cmd.Flags().DurationVar(&o.Duration, "duration", o.Duration, "Requested lifetime of the issued token. If not set, the lifetime will be determined by the server automatically. The server may return a token with a longer or shorter lifetime.") cmd.Flags().StringVar(&o.BoundObjectKind, "bound-object-kind", o.BoundObjectKind, "Kind of an object to bind the token to. "+ "Supported kinds are "+strings.Join(sets.StringKeySet(boundObjectKindToAPIVersion).List(), ", ")+". "+ @@ -149,6 +153,8 @@ func NewCmdCreateToken(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) "Requires --bound-object-kind and --bound-object-name. "+ "If unset, the UID of the existing object is used.") + o.Flags = cmd.Flags() + return cmd } @@ -195,7 +201,7 @@ func (o *TokenOptions) Validate() error { if len(o.Namespace) == 0 { return fmt.Errorf("--namespace is required") } - if o.Duration < 0 { + if o.Duration < 0 || (o.Duration == 0 && o.Flags.Changed("duration")) { return fmt.Errorf("--duration must be positive") } if o.Duration%time.Second != 0 { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go index 17a048032250f..e3e72f2196fc4 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go @@ -310,7 +310,7 @@ func TestGenerateDebugContainer(t *testing.T) { TerminationMessagePolicy: corev1.TerminationMessageReadFile, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -1323,11 +1323,12 @@ func TestGeneratePodCopyWithDebugContainer(t *testing.T) { ImagePullPolicy: corev1.PullIfNotPresent, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, }, + ShareProcessNamespace: pointer.Bool(true), }, }, }, @@ -1694,9 +1695,8 @@ func TestGenerateNodeDebugPod(t *testing.T) { TerminationMessagePolicy: corev1.TerminationMessageReadFile, VolumeMounts: nil, SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.Bool(true), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go index 603970fcb87df..ee3383250bb7c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go @@ -201,9 +201,11 @@ func (p *netadminProfile) Apply(pod *corev1.Pod, containerName string, target ru switch style { case node: useHostNamespaces(pod) - setPrivileged(pod, containerName) - case podCopy, ephemeral: + case podCopy: + shareProcessNamespace(pod) + + case ephemeral: // no additional modifications needed } @@ -269,20 +271,6 @@ func clearSecurityContext(p *corev1.Pod, containerName string) { }) } -// setPrivileged configures the containers as privileged. -func setPrivileged(p *corev1.Pod, containerName string) { - podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { - if c.Name != containerName { - return true - } - if c.SecurityContext == nil { - c.SecurityContext = &corev1.SecurityContext{} - } - c.SecurityContext.Privileged = pointer.Bool(true) - return false - }) -} - // disallowRoot configures the container to run as a non-root user. func disallowRoot(p *corev1.Pod, containerName string) { podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { @@ -326,13 +314,14 @@ func allowProcessTracing(p *corev1.Pod, containerName string) { }) } -// allowNetadminCapability grants NET_ADMIN capability to the container. +// allowNetadminCapability grants NET_ADMIN and NET_RAW capability to the container. func allowNetadminCapability(p *corev1.Pod, containerName string) { podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { if c.Name != containerName { return true } addCapability(c, "NET_ADMIN") + addCapability(c, "NET_RAW") return false }) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go index fa45f5105532b..7b44a9f3a73fe 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go @@ -495,7 +495,7 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -526,6 +526,7 @@ func TestNetAdminProfile(t *testing.T) { expectPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "podcopy"}, Spec: corev1.PodSpec{ + ShareProcessNamespace: pointer.Bool(true), Containers: []corev1.Container{ {Name: "app", Image: "appimage"}, { @@ -533,7 +534,7 @@ func TestNetAdminProfile(t *testing.T) { Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -572,6 +573,7 @@ func TestNetAdminProfile(t *testing.T) { expectPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "podcopy"}, Spec: corev1.PodSpec{ + ShareProcessNamespace: pointer.Bool(true), Containers: []corev1.Container{ {Name: "app", Image: "appimage"}, { @@ -579,7 +581,7 @@ func TestNetAdminProfile(t *testing.T) { Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN"}, + Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN", "NET_RAW"}, }, }, }, @@ -610,9 +612,8 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -630,7 +631,6 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{"SYS_PTRACE"}, }, @@ -652,9 +652,8 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN"}, + Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN", "NET_RAW"}, }, }, }, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go b/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go index 83998f1af54b6..f79a23b9463be 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_flags.go @@ -160,10 +160,8 @@ func (f *DeleteFlags) AddFlags(cmd *cobra.Command) { if f.Raw != nil { cmd.Flags().StringVar(f.Raw, "raw", *f.Raw, "Raw URI to DELETE to the server. Uses the transport specified by the kubeconfig file.") } - if cmdutil.InteractiveDelete.IsEnabled() { - if f.Interactive != nil { - cmd.Flags().BoolVarP(f.Interactive, "interactive", "i", *f.Interactive, "If true, delete resource only when user confirms. This flag is in Alpha.") - } + if f.Interactive != nil { + cmd.Flags().BoolVarP(f.Interactive, "interactive", "i", *f.Interactive, "If true, delete resource only when user confirms. This flag is in Alpha.") } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_test.go index c47d4fc72e02e..427b2713add9e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/delete/delete_test.go @@ -54,41 +54,37 @@ func TestDeleteFlagValidation(t *testing.T) { defer f.Cleanup() tests := []struct { - flags DeleteFlags - enableAlphas []cmdutil.FeatureGate - args [][]string - expectedErr string + flags DeleteFlags + args [][]string + expectedErr string }{ { flags: DeleteFlags{ Raw: pointer.String("test"), Interactive: pointer.Bool(true), }, - enableAlphas: []cmdutil.FeatureGate{cmdutil.InteractiveDelete}, - expectedErr: "--interactive can not be used with --raw", + expectedErr: "--interactive can not be used with --raw", }, } for _, test := range tests { cmd := fakecmd() - cmdtesting.WithAlphaEnvs(test.enableAlphas, t, func(t *testing.T) { - deleteOptions, err := test.flags.ToOptions(nil, genericiooptions.NewTestIOStreamsDiscard()) - if err != nil { - t.Fatalf("unexpected error creating delete options: %s", err) - } - deleteOptions.Filenames = []string{"../../../testdata/redis-master-controller.yaml"} - err = deleteOptions.Complete(f, nil, cmd) - if err != nil { - t.Fatalf("unexpected error creating delete options: %s", err) - } - err = deleteOptions.Validate() - if err == nil { - t.Fatalf("missing expected error") - } - if test.expectedErr != err.Error() { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - }) + deleteOptions, err := test.flags.ToOptions(nil, genericiooptions.NewTestIOStreamsDiscard()) + if err != nil { + t.Fatalf("unexpected error creating delete options: %s", err) + } + deleteOptions.Filenames = []string{"../../../testdata/redis-master-controller.yaml"} + err = deleteOptions.Complete(f, nil, cmd) + if err != nil { + t.Fatalf("unexpected error creating delete options: %s", err) + } + err = deleteOptions.Validate() + if err == nil { + t.Fatalf("missing expected error") + } + if test.expectedErr != err.Error() { + t.Errorf("expected error %s, got %s", test.expectedErr, err) + } } } @@ -362,34 +358,50 @@ func TestDeleteObjectWithInteractive(t *testing.T) { }), } - cmdtesting.WithAlphaEnvs([]cmdutil.FeatureGate{cmdutil.InteractiveDelete}, t, func(t *testing.T) { - streams, in, buf, _ := genericiooptions.NewTestIOStreams() - fmt.Fprint(in, "y") - cmd := NewCmdDelete(tf, streams) - cmd.Flags().Set("filename", "../../../testdata/redis-master-controller.yaml") - cmd.Flags().Set("output", "name") - cmd.Flags().Set("interactive", "true") - cmd.Run(cmd, []string{}) + streams, in, buf, _ := genericiooptions.NewTestIOStreams() + fmt.Fprint(in, "y") + cmd := NewCmdDelete(tf, streams) + err := cmd.Flags().Set("filename", "../../../testdata/redis-master-controller.yaml") + if err != nil { + t.Errorf("unexpected error %v", err) + } + err = cmd.Flags().Set("output", "name") + if err != nil { + t.Errorf("unexpected error %v", err) + } + err = cmd.Flags().Set("interactive", "true") + if err != nil { + t.Errorf("unexpected error %v", err) + } + cmd.Run(cmd, []string{}) - if buf.String() != "You are about to delete the following 1 resource(s):\nreplicationcontroller/redis-master\nDo you want to continue? (y/n): replicationcontroller/redis-master\n" { - t.Errorf("unexpected output: %s", buf.String()) - } + if buf.String() != "You are about to delete the following 1 resource(s):\nreplicationcontroller/redis-master\nDo you want to continue? (y/n): replicationcontroller/redis-master\n" { + t.Errorf("unexpected output: %s", buf.String()) + } - streams, in, buf, _ = genericiooptions.NewTestIOStreams() - fmt.Fprint(in, "n") - cmd = NewCmdDelete(tf, streams) - cmd.Flags().Set("filename", "../../../testdata/redis-master-controller.yaml") - cmd.Flags().Set("output", "name") - cmd.Flags().Set("interactive", "true") - cmd.Run(cmd, []string{}) + streams, in, buf, _ = genericiooptions.NewTestIOStreams() + fmt.Fprint(in, "n") + cmd = NewCmdDelete(tf, streams) + err = cmd.Flags().Set("filename", "../../../testdata/redis-master-controller.yaml") + if err != nil { + t.Errorf("unexpected error %v", err) + } + err = cmd.Flags().Set("output", "name") + if err != nil { + t.Errorf("unexpected error %v", err) + } + err = cmd.Flags().Set("interactive", "true") + if err != nil { + t.Errorf("unexpected error %v", err) + } + cmd.Run(cmd, []string{}) - if buf.String() != "You are about to delete the following 1 resource(s):\nreplicationcontroller/redis-master\nDo you want to continue? (y/n): deletion is cancelled\n" { - t.Errorf("unexpected output: %s", buf.String()) - } - if buf.String() == ": replicationcontroller/redis-master\n" { - t.Errorf("unexpected output: %s", buf.String()) - } - }) + if buf.String() != "You are about to delete the following 1 resource(s):\nreplicationcontroller/redis-master\nDo you want to continue? (y/n): deletion is cancelled\n" { + t.Errorf("unexpected output: %s", buf.String()) + } + if buf.String() == ": replicationcontroller/redis-master\n" { + t.Errorf("unexpected output: %s", buf.String()) + } } func TestGracePeriodScenarios(t *testing.T) { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/events/events.go b/staging/src/k8s.io/kubectl/pkg/cmd/events/events.go index 1830d28f3b4fd..cb4e0e60cf075 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/events/events.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/events/events.go @@ -121,7 +121,7 @@ func NewCmdEvents(restClientGetter genericclioptions.RESTClientGetter, streams g flags := NewEventsFlags(restClientGetter, streams) cmd := &cobra.Command{ - Use: fmt.Sprintf("events [(-o|--output=)%s] [--for TYPE/NAME] [--watch] [--event=Normal,Warning]", strings.Join(flags.PrintFlags.AllowedFormats(), "|")), + Use: fmt.Sprintf("events [(-o|--output=)%s] [--for TYPE/NAME] [--watch] [--types=Normal,Warning]", strings.Join(flags.PrintFlags.AllowedFormats(), "|")), DisableFlagsInUseLine: true, Short: i18n.T("List events"), Long: eventsLong, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/events/events_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/events/events_test.go index fdf448b48d529..7607f8aff2b51 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/events/events_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/events/events_test.go @@ -110,7 +110,7 @@ func TestEventIsSorted(t *testing.T) { codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) streams, _, buf, _ := genericiooptions.NewTestIOStreams() clientset, err := kubernetes.NewForConfig(cmdtesting.DefaultClientConfig()) - if err != err { + if err != nil { t.Fatal(err) } @@ -148,7 +148,7 @@ func TestEventNoHeaders(t *testing.T) { codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) streams, _, buf, _ := genericiooptions.NewTestIOStreams() clientset, err := kubernetes.NewForConfig(cmdtesting.DefaultClientConfig()) - if err != err { + if err != nil { t.Fatal(err) } @@ -185,7 +185,7 @@ func TestEventFiltered(t *testing.T) { codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) streams, _, buf, _ := genericiooptions.NewTestIOStreams() clientset, err := kubernetes.NewForConfig(cmdtesting.DefaultClientConfig()) - if err != err { + if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go index 2a29aecf8b815..36d43beceb9d3 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go @@ -27,6 +27,7 @@ import ( "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/cli-runtime/pkg/resource" @@ -113,17 +114,30 @@ func NewCmdExec(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Co // RemoteExecutor defines the interface accepted by the Exec command - provided for test stubbing type RemoteExecutor interface { - Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error + Execute(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error } // DefaultRemoteExecutor is the standard implementation of remote command execution type DefaultRemoteExecutor struct{} -func (*DefaultRemoteExecutor) Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { - exec, err := remotecommand.NewSPDYExecutor(config, method, url) +func (*DefaultRemoteExecutor) Execute(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { + // Legacy SPDY executor is default. If feature gate enabled, fallback + // executor attempts websockets first--then SPDY. + exec, err := remotecommand.NewSPDYExecutor(config, "POST", url) if err != nil { return err } + if cmdutil.RemoteCommandWebsockets.IsEnabled() { + // WebSocketExecutor must be "GET" method as described in RFC 6455 Sec. 4.1 (page 17). + websocketExec, err := remotecommand.NewWebSocketExecutor(config, "GET", url.String()) + if err != nil { + return err + } + exec, err = remotecommand.NewFallbackExecutor(websocketExec, exec, httpstream.IsUpgradeFailure) + if err != nil { + return err + } + } return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{ Stdin: stdin, Stdout: stdout, @@ -371,7 +385,7 @@ func (p *ExecOptions) Run() error { TTY: t.Raw, }, scheme.ParameterCodec) - return p.Executor.Execute("POST", req.URL(), p.Config, p.In, p.Out, p.ErrOut, t.Raw, sizeQueue) + return p.Executor.Execute(req.URL(), p.Config, p.In, p.Out, p.ErrOut, t.Raw, sizeQueue) } if err := t.Safe(fn); err != nil { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec_test.go index 82ffe85e75d12..7305231f129df 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/exec/exec_test.go @@ -40,13 +40,11 @@ import ( ) type fakeRemoteExecutor struct { - method string url *url.URL execErr error } -func (f *fakeRemoteExecutor) Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { - f.method = method +func (f *fakeRemoteExecutor) Execute(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { f.url = url return f.execErr } @@ -264,9 +262,6 @@ func TestExec(t *testing.T) { t.Errorf("%s: Did not get expected container query param for exec request", test.name) return } - if ex.method != "POST" { - t.Errorf("%s: Did not get method for exec request: %s", test.name, ex.method) - } }) } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/testing/fake.go b/staging/src/k8s.io/kubectl/pkg/cmd/testing/fake.go index b10d17393711d..d5e78a72e1428 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/testing/fake.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/testing/fake.go @@ -641,7 +641,7 @@ func testRESTMapper() meta.RESTMapper { } fakeDs := NewFakeCachedDiscoveryClient() - expander := restmapper.NewShortcutExpander(mapper, fakeDs) + expander := restmapper.NewShortcutExpander(mapper, fakeDs, nil) return expander } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index 43f72f54e3a21..03f3e7f0c7f9b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -425,13 +425,24 @@ func GetPodRunningTimeoutFlag(cmd *cobra.Command) (time.Duration, error) { type FeatureGate string const ( - ApplySet FeatureGate = "KUBECTL_APPLYSET" - CmdPluginAsSubcommand FeatureGate = "KUBECTL_ENABLE_CMD_SHADOW" - InteractiveDelete FeatureGate = "KUBECTL_INTERACTIVE_DELETE" + ApplySet FeatureGate = "KUBECTL_APPLYSET" + CmdPluginAsSubcommand FeatureGate = "KUBECTL_ENABLE_CMD_SHADOW" + InteractiveDelete FeatureGate = "KUBECTL_INTERACTIVE_DELETE" + RemoteCommandWebsockets FeatureGate = "KUBECTL_REMOTE_COMMAND_WEBSOCKETS" ) +// IsEnabled returns true iff environment variable is set to true. +// All other cases, it returns false. func (f FeatureGate) IsEnabled() bool { - return os.Getenv(string(f)) == "true" + return strings.ToLower(os.Getenv(string(f))) == "true" +} + +// IsDisabled returns true iff environment variable is set to false. +// All other cases, it returns true. +// This function is used for the cases where feature is enabled by default, +// but it may be needed to provide a way to ability to disable this feature. +func (f FeatureGate) IsDisabled() bool { + return strings.ToLower(os.Getenv(string(f))) == "false" } func AddValidateFlags(cmd *cobra.Command) { diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index 33576e194ec2b..2e0e2ec696ba4 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -215,7 +215,6 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescr {Group: networkingv1beta1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, {Group: networkingv1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, {Group: networkingv1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, - {Group: networkingv1alpha1.GroupName, Kind: "ClusterCIDR"}: &ClusterCIDRDescriber{c}, {Group: networkingv1alpha1.GroupName, Kind: "IPAddress"}: &IPAddressDescriber{c}, {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, @@ -871,11 +870,7 @@ func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { } } describeVolumes(pod.Spec.Volumes, w, "") - if pod.Status.QOSClass != "" { - w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass) - } else { - w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) - } + w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector) printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations) describeTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, w, "") @@ -1645,17 +1640,20 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri pc := d.CoreV1().Pods(namespace) - pods, err := getPodsForPVC(pc, pvc.Name, describerSettings) + pods, err := getPodsForPVC(pc, pvc, describerSettings) if err != nil { return "", err } - events, _ := searchEvents(d.CoreV1(), pvc, describerSettings.ChunkSize) + var events *corev1.EventList + if describerSettings.ShowEvents { + events, _ = searchEvents(d.CoreV1(), pvc, describerSettings.ChunkSize) + } return describePersistentVolumeClaim(pvc, events, pods) } -func getPodsForPVC(c corev1client.PodInterface, pvcName string, settings DescriberSettings) ([]corev1.Pod, error) { +func getPodsForPVC(c corev1client.PodInterface, pvc *corev1.PersistentVolumeClaim, settings DescriberSettings) ([]corev1.Pod, error) { nsPods, err := getPodsInChunks(c, metav1.ListOptions{Limit: settings.ChunkSize}) if err != nil { return []corev1.Pod{}, err @@ -1665,12 +1663,40 @@ func getPodsForPVC(c corev1client.PodInterface, pvcName string, settings Describ for _, pod := range nsPods.Items { for _, volume := range pod.Spec.Volumes { - if volume.VolumeSource.PersistentVolumeClaim != nil && volume.VolumeSource.PersistentVolumeClaim.ClaimName == pvcName { + if volume.VolumeSource.PersistentVolumeClaim != nil && volume.VolumeSource.PersistentVolumeClaim.ClaimName == pvc.Name { pods = append(pods, pod) } } } +ownersLoop: + for _, ownerRef := range pvc.ObjectMeta.OwnerReferences { + if ownerRef.Kind != "Pod" { + continue + } + + podIndex := -1 + for i, pod := range nsPods.Items { + if pod.UID == ownerRef.UID { + podIndex = i + break + } + } + if podIndex == -1 { + // Maybe the pod has been deleted + continue + } + + for _, pod := range pods { + if pod.UID == nsPods.Items[podIndex].UID { + // This owner pod is already recorded, look for pods between other owners + continue ownersLoop + } + } + + pods = append(pods, nsPods.Items[podIndex]) + } + return pods, nil } @@ -2818,63 +2844,6 @@ func (i *IngressClassDescriber) describeIngressClassV1(ic *networkingv1.IngressC }) } -// ClusterCIDRDescriber generates information about a ClusterCIDR. -type ClusterCIDRDescriber struct { - client clientset.Interface -} - -func (c *ClusterCIDRDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - var events *corev1.EventList - - ccV1alpha1, err := c.client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), name, metav1.GetOptions{}) - if err == nil { - if describerSettings.ShowEvents { - events, _ = searchEvents(c.client.CoreV1(), ccV1alpha1, describerSettings.ChunkSize) - } - return c.describeClusterCIDRV1alpha1(ccV1alpha1, events) - } - return "", err -} - -func (c *ClusterCIDRDescriber) describeClusterCIDRV1alpha1(cc *networkingv1alpha1.ClusterCIDR, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%v\n", cc.Name) - printLabelsMultiline(w, "Labels", cc.Labels) - printAnnotationsMultiline(w, "Annotations", cc.Annotations) - - w.Write(LEVEL_0, "NodeSelector:\n") - if cc.Spec.NodeSelector != nil { - w.Write(LEVEL_1, "NodeSelector Terms:") - if len(cc.Spec.NodeSelector.NodeSelectorTerms) == 0 { - w.WriteLine("") - } else { - w.WriteLine("") - for i, term := range cc.Spec.NodeSelector.NodeSelectorTerms { - printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions) - } - } - } - - if cc.Spec.PerNodeHostBits != 0 { - w.Write(LEVEL_0, "PerNodeHostBits:\t%s\n", fmt.Sprint(cc.Spec.PerNodeHostBits)) - } - - if cc.Spec.IPv4 != "" { - w.Write(LEVEL_0, "IPv4:\t%s\n", cc.Spec.IPv4) - } - - if cc.Spec.IPv6 != "" { - w.Write(LEVEL_0, "IPv6:\t%s\n", cc.Spec.IPv6) - } - - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - // IPAddressDescriber generates information about an IPAddress. type IPAddressDescriber struct { client clientset.Interface diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go index 2db89026ace79..f78d961fd0df9 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go @@ -1765,9 +1765,11 @@ func TestPersistentVolumeClaimDescriber(t *testing.T) { now := time.Now() deletionTimestamp := metav1.Time{Time: time.Now().UTC().AddDate(-10, 0, 0)} snapshotAPIGroup := "snapshot.storage.k8s.io" + defaultDescriberSettings := &DescriberSettings{ShowEvents: true} testCases := []struct { name string pvc *corev1.PersistentVolumeClaim + describerSettings *DescriberSettings expectedElements []string unexpectedElements []string }{ @@ -1783,6 +1785,7 @@ func TestPersistentVolumeClaimDescriber(t *testing.T) { Phase: corev1.ClaimBound, }, }, + expectedElements: []string{"Events"}, unexpectedElements: []string{"VolumeMode", "Filesystem"}, }, { @@ -1967,13 +1970,36 @@ func TestPersistentVolumeClaimDescriber(t *testing.T) { }, expectedElements: []string{"DataSource:\n APIGroup: snapshot.storage.k8s.io\n Kind: VolumeSnapshot\n Name: src-snapshot\n"}, }, + { + name: "no-show-events", + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "volume1", + StorageClassName: &goldClassName, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + }, + unexpectedElements: []string{"Events"}, + describerSettings: &DescriberSettings{ShowEvents: false}, + }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { fake := fake.NewSimpleClientset(test.pvc) c := PersistentVolumeClaimDescriber{fake} - str, err := c.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + + var describerSettings DescriberSettings + if test.describerSettings != nil { + describerSettings = *test.describerSettings + } else { + describerSettings = *defaultDescriberSettings + } + + str, err := c.Describe("foo", "bar", describerSettings) if err != nil { t.Errorf("Unexpected error for test %s: %v", test.name, err) } @@ -1994,6 +2020,123 @@ func TestPersistentVolumeClaimDescriber(t *testing.T) { } } +func TestGetPodsForPVC(t *testing.T) { + goldClassName := "gold" + testCases := []struct { + name string + pvc *corev1.PersistentVolumeClaim + requiredObjects []runtime.Object + expectedPods []string + }{ + { + name: "pvc-unused", + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "pvc-name"}, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "volume1", + StorageClassName: &goldClassName, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + }, + expectedPods: []string{}, + }, + { + name: "pvc-in-pods-volumes-list", + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "pvc-name"}, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "volume1", + StorageClassName: &goldClassName, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + }, + requiredObjects: []runtime.Object{ + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "pod-name"}, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-name", + }, + }, + }, + }, + }, + }, + }, + expectedPods: []string{"pod-name"}, + }, + { + name: "pvc-owned-by-pod", + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "pvc-name", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Pod", + Name: "pod-name", + UID: "pod-uid", + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "volume1", + StorageClassName: &goldClassName, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + }, + requiredObjects: []runtime.Object{ + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "pod-name", UID: "pod-uid"}, + }, + }, + expectedPods: []string{"pod-name"}, + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + var objects []runtime.Object + objects = append(objects, test.requiredObjects...) + objects = append(objects, test.pvc) + fake := fake.NewSimpleClientset(objects...) + + pods, err := getPodsForPVC(fake.CoreV1().Pods(test.pvc.ObjectMeta.Namespace), test.pvc, DescriberSettings{}) + if err != nil { + t.Errorf("Unexpected error for test %s: %v", test.name, err) + } + + for _, expectedPod := range test.expectedPods { + foundPod := false + for _, pod := range pods { + if pod.Name == expectedPod { + foundPod = true + break + } + } + + if !foundPod { + t.Errorf("Expected pod %s, but it was not returned: %v", expectedPod, pods) + } + } + + if len(test.expectedPods) != len(pods) { + t.Errorf("Expected %d pods, but got %d pods", len(test.expectedPods), len(pods)) + } + }) + } +} + func TestDescribeDeployment(t *testing.T) { labels := map[string]string{"k8s-app": "bar"} testCases := []struct { @@ -5789,64 +5932,6 @@ Events: ` + "\n", } } -func TestDescribeClusterCIDR(t *testing.T) { - - testcases := map[string]struct { - input *fake.Clientset - output string - }{ - "ClusterCIDR v1alpha1": { - input: fake.NewSimpleClientset(&networkingv1alpha1.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo.123", - }, - Spec: networkingv1alpha1.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: "In", - Values: []string{"bar"}}, - }, - }, - }, - }, - }, - }), - - output: `Name: foo.123 -Labels: -Annotations: -NodeSelector: - NodeSelector Terms: - Term 0: foo in [bar] -PerNodeHostBits: 8 -IPv4: 10.1.0.0/16 -IPv6: fd00:1:1::/64 -Events: ` + "\n", - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - c := &describeClient{T: t, Namespace: "foo", Interface: tc.input} - d := ClusterCIDRDescriber{c} - out, err := d.Describe("bar", "foo.123", DescriberSettings{ShowEvents: true}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if out != tc.output { - t.Errorf("expected :\n%s\nbut got output:\n%s diff:\n%s", tc.output, out, cmp.Diff(tc.output, out)) - } - }) - } -} - func TestDescribeIPAddress(t *testing.T) { testcases := map[string]struct { diff --git a/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go b/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go index 3037eb6f5b47b..f4d18149906b1 100644 --- a/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go +++ b/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go @@ -254,10 +254,10 @@ func TestGetBool(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { got, err := GetBool(tt.parameters, tt.key, tt.defaultValue) - if err != nil && tt.expectError == false { + if err != nil && !tt.expectError { t.Errorf("%s: unexpected error: %v", tt.name, err) } - if err == nil && tt.expectError == true { + if err == nil && tt.expectError { t.Errorf("%s: expect error, got nil", tt.name) } if got != tt.expected { diff --git a/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go b/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go index 2270691f51ff6..68b1b9072a9d4 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go +++ b/staging/src/k8s.io/kubectl/pkg/util/qos/qos.go @@ -28,11 +28,21 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } -// GetPodQOS returns the QoS class of a pod. +// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field. +// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { + if pod.Status.QOSClass != "" { + return pod.Status.QOSClass + } + return ComputePodQOS(pod) +} + +// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more +// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *core.Pod) core.PodQOSClass { +func ComputePodQOS(pod *core.Pod) core.PodQOSClass { requests := core.ResourceList{} limits := core.ResourceList{} zeroQuantity := resource.MustParse("0") diff --git a/staging/src/k8s.io/kubectl/pkg/util/term/term.go b/staging/src/k8s.io/kubectl/pkg/util/term/term.go index 6bcda59d73d30..93a992fe31efd 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/term/term.go +++ b/staging/src/k8s.io/kubectl/pkg/util/term/term.go @@ -19,7 +19,8 @@ package term import ( "io" "os" - "runtime" + + "k8s.io/cli-runtime/pkg/printers" "github.com/moby/term" @@ -56,46 +57,23 @@ type TTY struct { // IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty // even if TryDev is set. func (t TTY) IsTerminalIn() bool { - return IsTerminal(t.In) + return printers.IsTerminal(t.In) } // IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty // even if TryDev is set. func (t TTY) IsTerminalOut() bool { - return IsTerminal(t.Out) + return printers.IsTerminal(t.Out) } -// IsTerminal returns whether the passed object is a terminal or not -func IsTerminal(i interface{}) bool { - _, terminal := term.GetFdInfo(i) - return terminal -} +// IsTerminal returns whether the passed object is a terminal or not. +// Deprecated: use printers.IsTerminal instead. +var IsTerminal = printers.IsTerminal // AllowsColorOutput returns true if the specified writer is a terminal and // the process environment indicates color output is supported and desired. -func AllowsColorOutput(w io.Writer) bool { - if !IsTerminal(w) { - return false - } - - // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals - if os.Getenv("TERM") == "dumb" { - return false - } - - // https://no-color.org/ - if _, nocolor := os.LookupEnv("NO_COLOR"); nocolor { - return false - } - - // On Windows WT_SESSION is set by the modern terminal component. - // Older terminals have poor support for UTF-8, VT escape codes, etc. - if runtime.GOOS == "windows" && os.Getenv("WT_SESSION") == "" { - return false - } - - return true -} +// Deprecated: use printers.AllowsColorOutput instead. +var AllowsColorOutput = printers.AllowsColorOutput // Safe invokes the provided function and will attempt to ensure that when the // function returns (or a termination signal is sent) that the terminal state diff --git a/staging/src/k8s.io/kubectl/testdata/apply/applyset-cr.yaml b/staging/src/k8s.io/kubectl/testdata/apply/applyset-cr.yaml index 77ed893c7df32..5c1e03c606dbe 100644 --- a/staging/src/k8s.io/kubectl/testdata/apply/applyset-cr.yaml +++ b/staging/src/k8s.io/kubectl/testdata/apply/applyset-cr.yaml @@ -4,6 +4,6 @@ metadata: name: my-set annotations: applyset.kubernetes.io/tooling: kubectl/v0.0.0 - applyset.kubernetes.io/contains-group-resources: "" + applyset.kubernetes.io/contains-group-kinds: "" labels: applyset.kubernetes.io/id: applyset-rhp1a-HVAVT_dFgyEygyA1BEB82HPp2o10UiFTpqtAs-v1 diff --git a/staging/src/k8s.io/kubectl/testdata/openapi/swagger.json b/staging/src/k8s.io/kubectl/testdata/openapi/swagger.json index 2fe7cd1cd0fda..fb8907a540fd3 100644 --- a/staging/src/k8s.io/kubectl/testdata/openapi/swagger.json +++ b/staging/src/k8s.io/kubectl/testdata/openapi/swagger.json @@ -11038,7 +11038,7 @@ "type": "string" }, "name": { - "description": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "description": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "type": "string" }, "port": { @@ -11802,7 +11802,7 @@ "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -12348,7 +12348,7 @@ "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "properties": { "assuredConcurrencyShares": { - "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", + "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "format": "int32", "type": "integer" }, @@ -15474,7 +15474,7 @@ "type": "string" }, "podInfoOnMount": { - "description": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "description": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", "type": "boolean" }, "requiresRepublish": { @@ -16205,7 +16205,7 @@ "type": "string" }, "podInfoOnMount": { - "description": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "description": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", "type": "boolean" }, "requiresRepublish": { @@ -19239,7 +19239,7 @@ "type": "string" }, "groupPriorityMinimum": { - "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", "format": "int32", "type": "integer" }, @@ -19414,7 +19414,7 @@ "type": "string" }, "groupPriorityMinimum": { - "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", "format": "int32", "type": "integer" }, diff --git a/staging/src/k8s.io/kubectl/testdata/openapi/v3/api/v1.json b/staging/src/k8s.io/kubectl/testdata/openapi/v3/api/v1.json index 16ad68446391c..a8e2c9f200465 100644 --- a/staging/src/k8s.io/kubectl/testdata/openapi/v3/api/v1.json +++ b/staging/src/k8s.io/kubectl/testdata/openapi/v3/api/v1.json @@ -25523,7 +25523,7 @@ "format": "int32" }, "grpc": { - "description": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.", + "description": "GRPC specifies an action involving a GRPC port.", "allOf": [ { "$ref": "#/components/schemas/io.k8s.api.core.v1.GRPCAction" } ] diff --git a/staging/src/k8s.io/kubelet/config/v1beta1/types.go b/staging/src/k8s.io/kubelet/config/v1beta1/types.go index b1ad1353fca19..fd1439e9ebf0f 100644 --- a/staging/src/k8s.io/kubelet/config/v1beta1/types.go +++ b/staging/src/k8s.io/kubelet/config/v1beta1/types.go @@ -290,6 +290,12 @@ type KubeletConfiguration struct { // Default: "2m" // +optional ImageMinimumGCAge metav1.Duration `json:"imageMinimumGCAge,omitempty"` + // imageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. + // The default of this field is "0s", which disables this field--meaning images won't be garbage + // collected based on being unused for too long. + // Default: "0s" (disabled) + // +optional + ImageMaximumGCAge metav1.Duration `json:"imageMaximumGCAge,omitempty"` // imageGCHighThresholdPercent is the percent of disk usage after which // image garbage collection is always run. The percent is calculated by // dividing this field value by 100, so this field must be between 0 and diff --git a/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go index 85e564e3ea938..ff653a9923c42 100644 --- a/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go @@ -237,6 +237,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.ImageMinimumGCAge = in.ImageMinimumGCAge + out.ImageMaximumGCAge = in.ImageMaximumGCAge if in.ImageGCHighThresholdPercent != nil { in, out := &in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent *out = new(int32) diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod index ebdc72ab0f27b..f04566d2c6daa 100644 --- a/staging/src/k8s.io/kubelet/go.mod +++ b/staging/src/k8s.io/kubelet/go.mod @@ -2,13 +2,13 @@ module k8s.io/kubelet -go 1.20 +go 1.21.3 require ( - github.com/emicklei/go-restful/v3 v3.9.0 + github.com/emicklei/go-restful/v3 v3.11.0 github.com/gogo/protobuf v1.3.2 - github.com/stretchr/testify v1.8.2 - google.golang.org/grpc v1.54.0 + github.com/stretchr/testify v1.8.4 + google.golang.org/grpc v1.58.2 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -35,6 +35,7 @@ require ( github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect @@ -42,14 +43,14 @@ require ( github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum index 400990aacf798..ee62c44a8efc3 100644 --- a/staging/src/k8s.io/kubelet/go.sum +++ b/staging/src/k8s.io/kubelet/go.sum @@ -1,4 +1,4 @@ -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -17,7 +17,7 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91 github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -26,13 +26,13 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -51,7 +51,7 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -79,7 +79,7 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -111,10 +111,11 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -139,13 +140,10 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -159,24 +157,22 @@ go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQa go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -186,47 +182,47 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -241,16 +237,15 @@ gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go b/staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go index f201ce361d440..54355503f6a0a 100644 --- a/staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go +++ b/staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1/types.go @@ -83,6 +83,11 @@ type RuntimeStats struct { // Usage here refers to the total number of bytes occupied by images on the filesystem. // +optional ImageFs *FsStats `json:"imageFs,omitempty"` + // Stats about the underlying filesystem where container's writeable layer is stored. + // This filesystem could be the same as the primary (root) filesystem or the ImageFS. + // Usage here refers to the total number of bytes occupied by the writeable layer on the filesystem. + // +optional + ContainerFs *FsStats `json:"containerFs,omitempty"` } const ( diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 5c82ef14c8e14..46a9608cea8df 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -2091,7 +2091,7 @@ func deduplicate(collection *[]string) *[]string { result := make([]string, 0, len(*collection)) for _, v := range *collection { - if seen[v] == true { + if seen[v] { // skip this element } else { seen[v] = true diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index c9059355f1d3a..c62a867e63261 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -904,7 +904,7 @@ func (ss *scaleSet) getPrimaryNetworkInterfaceConfiguration(networkConfiguration for idx := range networkConfigurations { networkConfig := &networkConfigurations[idx] - if networkConfig.Primary != nil && *networkConfig.Primary == true { + if networkConfig.Primary != nil && *networkConfig.Primary { return networkConfig, nil } } @@ -920,7 +920,7 @@ func (ss *scaleSet) getPrimaryNetworkInterfaceConfigurationForScaleSet(networkCo for idx := range networkConfigurations { networkConfig := &networkConfigurations[idx] - if networkConfig.Primary != nil && *networkConfig.Primary == true { + if networkConfig.Primary != nil && *networkConfig.Primary { return networkConfig, nil } } @@ -936,7 +936,7 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale for idx := range ipConfigurations { ipConfig := &ipConfigurations[idx] - if ipConfig.Primary != nil && *ipConfig.Primary == true { + if ipConfig.Primary != nil && *ipConfig.Primary { return ipConfig, nil } } diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances_test.go index 91a1c3ad024ec..4496f2328006c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances_test.go @@ -21,7 +21,6 @@ package gce import ( "context" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -55,7 +54,7 @@ func TestInstanceExists(t *testing.T) { name: "node not exist", nodeName: "test-node-2", exist: false, - expectedErr: fmt.Errorf("failed to get instance ID from cloud provider: instance not found"), + expectedErr: nil, }, } diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod index ca668045bf87f..c58b2a6872a77 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.mod +++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod @@ -2,7 +2,7 @@ module k8s.io/legacy-cloud-providers -go 1.20 +go 1.21.3 require ( cloud.google.com/go/compute/metadata v0.2.3 @@ -14,11 +14,11 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 - github.com/stretchr/testify v1.8.2 - github.com/vmware/govmomi v0.30.0 - golang.org/x/crypto v0.11.0 - golang.org/x/oauth2 v0.8.0 - google.golang.org/api v0.114.0 + github.com/stretchr/testify v1.8.4 + github.com/vmware/govmomi v0.30.6 + golang.org/x/crypto v0.14.0 + golang.org/x/oauth2 v0.10.0 + google.golang.org/api v0.126.0 gopkg.in/gcfg.v1 v1.2.3 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 @@ -31,7 +31,7 @@ require ( ) require ( - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go/compute v1.23.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect @@ -43,7 +43,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -56,9 +56,10 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -74,21 +75,21 @@ require ( github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-helpers v0.0.0 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect ) diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum index f86ca4155c4cc..a570d4b4cdfe6 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.sum +++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum @@ -25,22 +25,19 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -106,7 +103,7 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -116,8 +113,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -125,13 +122,13 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -160,7 +157,7 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -239,6 +236,8 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -248,13 +247,13 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -295,10 +294,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -335,10 +334,11 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA= -github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= +github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -359,19 +359,17 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -382,8 +380,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -460,8 +458,8 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -478,8 +476,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -492,7 +490,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -543,13 +542,13 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -560,8 +559,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -621,8 +620,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -658,8 +657,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -725,12 +724,13 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -756,8 +756,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -801,18 +801,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go b/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go index 5bd3eea6e3692..7f63710d7b1f0 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go +++ b/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go @@ -134,7 +134,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { for vc, vsi := range nm.vsphereInstanceMap { found := getVMFound() - if found == true { + if found { break } @@ -175,7 +175,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { for _, datacenterObj := range datacenterObjs { found := getVMFound() - if found == true { + if found { break } @@ -283,7 +283,7 @@ func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) { } if nm.nodeLister != nil { - klog.V(4).Infof("Node %s missing in vSphere cloud provider cache, trying node informer") + klog.V(4).Infof("Node %s missing in vSphere cloud provider cache, trying node informer", nodeName) node, err := nm.nodeLister.Get(convertToString(nodeName)) if err != nil { if !errors.IsNotFound(err) { @@ -299,7 +299,7 @@ func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) { } if nm.nodeGetter != nil { - klog.V(4).Infof("Node %s missing in vSphere cloud provider caches, trying the API server") + klog.V(4).Infof("Node %s missing in vSphere cloud provider caches, trying the API server", nodeName) node, err := nm.nodeGetter.Nodes().Get(context.TODO(), convertToString(nodeName), metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) { diff --git a/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go b/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go index 06c67e4a92654..128a52c6a9de3 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go @@ -165,7 +165,7 @@ func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (VSphereC model.Service.ServeMux.Handle(path, handler) // vAPI simulator - paths, handler := vapi.New(s.URL, vpx.Setting) + paths, handler := vapi.New(s.URL, simulator.Map) path = paths[0] model.Service.ServeMux.Handle(path, handler) diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod index af6e2b0624849..aa7e46f1f84d0 100644 --- a/staging/src/k8s.io/metrics/go.mod +++ b/staging/src/k8s.io/metrics/go.mod @@ -2,11 +2,11 @@ module k8s.io/metrics -go 1.20 +go 1.21.3 require ( github.com/gogo/protobuf v1.3.2 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 @@ -15,7 +15,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -35,22 +35,22 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum index 6a184f428ce67..dde5580d7d47c 100644 --- a/staging/src/k8s.io/metrics/go.sum +++ b/staging/src/k8s.io/metrics/go.sum @@ -1,4 +1,5 @@ -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -6,8 +7,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -72,10 +73,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -92,44 +93,44 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -137,8 +138,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -162,13 +163,13 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/mount-utils/go.mod b/staging/src/k8s.io/mount-utils/go.mod index e0f2eea466859..3d4d02bfb934e 100644 --- a/staging/src/k8s.io/mount-utils/go.mod +++ b/staging/src/k8s.io/mount-utils/go.mod @@ -2,12 +2,12 @@ module k8s.io/mount-utils -go 1.20 +go 1.21.3 require ( github.com/moby/sys/mountinfo v0.6.2 - github.com/stretchr/testify v1.8.2 - golang.org/x/sys v0.10.0 + github.com/stretchr/testify v1.8.4 + golang.org/x/sys v0.13.0 k8s.io/klog/v2 v2.100.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b ) diff --git a/staging/src/k8s.io/mount-utils/go.sum b/staging/src/k8s.io/mount-utils/go.sum index 093ecd91838ee..cf0463fb42695 100644 --- a/staging/src/k8s.io/mount-utils/go.sum +++ b/staging/src/k8s.io/mount-utils/go.sum @@ -1,5 +1,4 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -20,20 +19,15 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= diff --git a/staging/src/k8s.io/mount-utils/mount_helper_common.go b/staging/src/k8s.io/mount-utils/mount_helper_common.go index 440cf64dd079b..fc7bbc143df25 100644 --- a/staging/src/k8s.io/mount-utils/mount_helper_common.go +++ b/staging/src/k8s.io/mount-utils/mount_helper_common.go @@ -139,7 +139,7 @@ func removePathIfNotMountPoint(mountPath string, mounter Interface, extensiveMou } if notMnt { - klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + klog.V(4).Infof("%q is not a mountpoint, deleting", mountPath) return notMnt, os.Remove(mountPath) } return notMnt, nil @@ -147,7 +147,7 @@ func removePathIfNotMountPoint(mountPath string, mounter Interface, extensiveMou // removePath attempts to remove the directory. Returns nil if the directory was removed or does not exist. func removePath(mountPath string) error { - klog.V(4).Infof("Warning: deleting path %q", mountPath) + klog.V(4).Infof("Deleting path %q", mountPath) err := os.Remove(mountPath) if os.IsNotExist(err) { klog.V(4).Infof("%q does not exist", mountPath) diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod index 9f993b7c50c7c..66316b3e61635 100644 --- a/staging/src/k8s.io/pod-security-admission/go.mod +++ b/staging/src/k8s.io/pod-security-admission/go.mod @@ -2,14 +2,14 @@ module k8s.io/pod-security-admission -go 1.20 +go 1.21.3 require ( github.com/blang/semver/v4 v4.0.0 github.com/google/go-cmp v0.5.9 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 @@ -30,10 +30,10 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.2.3 // indirect @@ -48,7 +48,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -68,41 +68,40 @@ require ( go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/otel/trace v1.10.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kms v0.0.0 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect ) diff --git a/staging/src/k8s.io/pod-security-admission/go.sum b/staging/src/k8s.io/pod-security-admission/go.sum index 29e10b370dcd9..28d5470f4d8b3 100644 --- a/staging/src/k8s.io/pod-security-admission/go.sum +++ b/staging/src/k8s.io/pod-security-admission/go.sum @@ -1,163 +1,126 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -174,25 +137,12 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -205,27 +155,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -249,78 +189,31 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -330,11 +223,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -346,8 +236,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -374,10 +262,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -387,7 +275,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= @@ -395,7 +282,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -403,7 +289,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -420,16 +305,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= @@ -447,32 +330,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -485,298 +360,83 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -784,25 +444,15 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/pod-security-admission/policy/check_sysctls.go b/staging/src/k8s.io/pod-security-admission/policy/check_sysctls.go index 95c5eff286ac3..78950c3b70bd6 100644 --- a/staging/src/k8s.io/pod-security-admission/policy/check_sysctls.go +++ b/staging/src/k8s.io/pod-security-admission/policy/check_sysctls.go @@ -43,6 +43,10 @@ spec.securityContext.sysctls[*].name 'net.ipv4.ping_group_range' 'net.ipv4.ip_unprivileged_port_start' 'net.ipv4.ip_local_reserved_ports' +'net.ipv4.tcp_keepalive_time' +'net.ipv4.tcp_fin_timeout' +'net.ipv4.tcp_keepalive_intvl' +'net.ipv4.tcp_keepalive_probes' */ @@ -59,25 +63,28 @@ func CheckSysctls() Check { Versions: []VersionedCheck{ { MinimumVersion: api.MajorMinorVersion(1, 0), - CheckPod: sysctls_1_0, + CheckPod: sysctlsV1Dot0, }, { MinimumVersion: api.MajorMinorVersion(1, 27), - CheckPod: sysctls_1_27, + CheckPod: sysctlsV1Dot27, + }, { + MinimumVersion: api.MajorMinorVersion(1, 29), + CheckPod: sysctlsV1Dot29, }, }, } } var ( - sysctls_allowed_1_0 = sets.NewString( + sysctlsAllowedV1Dot0 = sets.NewString( "kernel.shm_rmid_forced", "net.ipv4.ip_local_port_range", "net.ipv4.tcp_syncookies", "net.ipv4.ping_group_range", "net.ipv4.ip_unprivileged_port_start", ) - sysctls_allowed_1_27 = sets.NewString( + sysctlsAllowedV1Dot27 = sets.NewString( "kernel.shm_rmid_forced", "net.ipv4.ip_local_port_range", "net.ipv4.tcp_syncookies", @@ -85,14 +92,30 @@ var ( "net.ipv4.ip_unprivileged_port_start", "net.ipv4.ip_local_reserved_ports", ) + sysctlsAllowedV1Dot29 = sets.NewString( + "kernel.shm_rmid_forced", + "net.ipv4.ip_local_port_range", + "net.ipv4.tcp_syncookies", + "net.ipv4.ping_group_range", + "net.ipv4.ip_unprivileged_port_start", + "net.ipv4.ip_local_reserved_ports", + "net.ipv4.tcp_keepalive_time", + "net.ipv4.tcp_fin_timeout", + "net.ipv4.tcp_keepalive_intvl", + "net.ipv4.tcp_keepalive_probes", + ) ) -func sysctls_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { - return sysctls(podMetadata, podSpec, sysctls_allowed_1_0) +func sysctlsV1Dot0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return sysctls(podMetadata, podSpec, sysctlsAllowedV1Dot0) +} + +func sysctlsV1Dot27(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return sysctls(podMetadata, podSpec, sysctlsAllowedV1Dot27) } -func sysctls_1_27(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { - return sysctls(podMetadata, podSpec, sysctls_allowed_1_27) +func sysctlsV1Dot29(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec) CheckResult { + return sysctls(podMetadata, podSpec, sysctlsAllowedV1Dot29) } func sysctls(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec, sysctls_allowed_set sets.String) CheckResult { diff --git a/staging/src/k8s.io/pod-security-admission/policy/check_sysctls_test.go b/staging/src/k8s.io/pod-security-admission/policy/check_sysctls_test.go index b1940d7725232..b09af170a38b9 100644 --- a/staging/src/k8s.io/pod-security-admission/policy/check_sysctls_test.go +++ b/staging/src/k8s.io/pod-security-admission/policy/check_sysctls_test.go @@ -42,7 +42,7 @@ func TestSysctls(t *testing.T) { expectDetail: `a, b`, }, { - name: "new supported sysctls not supported", + name: "new supported sysctls not supported: net.ipv4.ip_local_reserved_ports", pod: &corev1.Pod{Spec: corev1.PodSpec{ SecurityContext: &corev1.PodSecurityContext{ Sysctls: []corev1.Sysctl{{Name: "net.ipv4.ip_local_reserved_ports", Value: "1024-4999"}}, @@ -52,11 +52,55 @@ func TestSysctls(t *testing.T) { expectReason: `forbidden sysctls`, expectDetail: `net.ipv4.ip_local_reserved_ports`, }, + { + name: "new supported sysctls not supported: net.ipv4.tcp_keepalive_time", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_keepalive_time", Value: "7200"}}, + }, + }}, + allowed: false, + expectReason: `forbidden sysctls`, + expectDetail: `net.ipv4.tcp_keepalive_time`, + }, + { + name: "new supported sysctls not supported: net.ipv4.tcp_fin_timeout", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_fin_timeout", Value: "60"}}, + }, + }}, + allowed: false, + expectReason: `forbidden sysctls`, + expectDetail: `net.ipv4.tcp_fin_timeout`, + }, + { + name: "new supported sysctls not supported: net.ipv4.tcp_keepalive_intvl", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_keepalive_intvl", Value: "75"}}, + }, + }}, + allowed: false, + expectReason: `forbidden sysctls`, + expectDetail: `net.ipv4.tcp_keepalive_intvl`, + }, + { + name: "new supported sysctls not supported: net.ipv4.tcp_keepalive_probes", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_keepalive_probes", Value: "9"}}, + }, + }}, + allowed: false, + expectReason: `forbidden sysctls`, + expectDetail: `net.ipv4.tcp_keepalive_probes`, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - result := sysctls_1_0(&tc.pod.ObjectMeta, &tc.pod.Spec) + result := sysctlsV1Dot0(&tc.pod.ObjectMeta, &tc.pod.Spec) if !tc.allowed { if result.Allowed { t.Fatal("expected disallowed") @@ -67,10 +111,8 @@ func TestSysctls(t *testing.T) { if e, a := tc.expectDetail, result.ForbiddenDetail; e != a { t.Errorf("expected\n%s\ngot\n%s", e, a) } - } else { - if !result.Allowed { - t.Fatal("expected allowed") - } + } else if !result.Allowed { + t.Fatal("expected allowed") } }) } @@ -108,7 +150,7 @@ func TestSysctls_1_27(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - result := sysctls_1_27(&tc.pod.ObjectMeta, &tc.pod.Spec) + result := sysctlsV1Dot27(&tc.pod.ObjectMeta, &tc.pod.Spec) if !tc.allowed { if result.Allowed { t.Fatal("expected disallowed") @@ -119,10 +161,85 @@ func TestSysctls_1_27(t *testing.T) { if e, a := tc.expectDetail, result.ForbiddenDetail; e != a { t.Errorf("expected\n%s\ngot\n%s", e, a) } - } else { - if !result.Allowed { - t.Fatal("expected allowed") + } else if !result.Allowed { + t.Fatal("expected allowed") + } + }) + } +} + +func TestSysctls_1_29(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + allowed bool + expectReason string + expectDetail string + }{ + { + name: "forbidden sysctls", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "a"}, {Name: "b"}}, + }, + }}, + allowed: false, + expectReason: `forbidden sysctls`, + expectDetail: `a, b`, + }, + { + name: "new supported sysctls: net.ipv4.tcp_keepalive_time", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_keepalive_time", Value: "7200"}}, + }, + }}, + allowed: true, + }, + { + name: "new supported sysctls: net.ipv4.tcp_fin_timeout", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_fin_timeout", Value: "60"}}, + }, + }}, + allowed: true, + }, + { + name: "new supported sysctls: net.ipv4.tcp_keepalive_intvl", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_keepalive_intvl", Value: "75"}}, + }, + }}, + allowed: true, + }, + { + name: "new supported sysctls: net.ipv4.tcp_keepalive_probes", + pod: &corev1.Pod{Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + Sysctls: []corev1.Sysctl{{Name: "net.ipv4.tcp_keepalive_probes", Value: "9"}}, + }, + }}, + allowed: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := sysctlsV1Dot29(&tc.pod.ObjectMeta, &tc.pod.Spec) + if !tc.allowed { + if result.Allowed { + t.Fatal("expected disallowed") + } + if e, a := tc.expectReason, result.ForbiddenReason; e != a { + t.Errorf("expected\n%s\ngot\n%s", e, a) + } + if e, a := tc.expectDetail, result.ForbiddenDetail; e != a { + t.Errorf("expected\n%s\ngot\n%s", e, a) } + } else if !result.Allowed { + t.Fatal("expected allowed") } }) } diff --git a/staging/src/k8s.io/pod-security-admission/test/fixtures_sysctls.go b/staging/src/k8s.io/pod-security-admission/test/fixtures_sysctls.go index dd3f0ccdd517d..9ef1a0feb1bc7 100644 --- a/staging/src/k8s.io/pod-security-admission/test/fixtures_sysctls.go +++ b/staging/src/k8s.io/pod-security-admission/test/fixtures_sysctls.go @@ -65,11 +65,11 @@ func init() { } }, } - registerFixtureGenerator( fixtureKey{level: api.LevelBaseline, version: api.MajorMinorVersion(1, 0), check: "sysctls"}, fixtureData_1_0, ) + fixtureData_1_27 := fixtureGenerator{ expectErrorSubstring: "forbidden sysctl", generatePass: func(p *corev1.Pod) []*corev1.Pod { @@ -106,9 +106,54 @@ func init() { } }, } - registerFixtureGenerator( fixtureKey{level: api.LevelBaseline, version: api.MajorMinorVersion(1, 27), check: "sysctls"}, fixtureData_1_27, ) + + fixtureDataV1Dot29 := fixtureGenerator{ + expectErrorSubstring: "forbidden sysctl", + generatePass: func(p *corev1.Pod) []*corev1.Pod { + if p.Spec.SecurityContext == nil { + p.Spec.SecurityContext = &corev1.PodSecurityContext{} + } + return []*corev1.Pod{ + // security context with no sysctls + tweak(p, func(p *corev1.Pod) { p.Spec.SecurityContext.Sysctls = nil }), + // sysctls with name="kernel.shm_rmid_forced" ,"net.ipv4.ip_local_port_range" + // "net.ipv4.tcp_syncookies", "net.ipv4.ping_group_range", + // "net.ipv4.ip_unprivileged_port_start", "net.ipv4.ip_local_reserved_ports", + // "net.ipv4.tcp_keepalive_time" + tweak(p, func(p *corev1.Pod) { + p.Spec.SecurityContext.Sysctls = []corev1.Sysctl{ + {Name: "kernel.shm_rmid_forced", Value: "0"}, + {Name: "net.ipv4.ip_local_port_range", Value: "1024 65535"}, + {Name: "net.ipv4.tcp_syncookies", Value: "0"}, + {Name: "net.ipv4.ping_group_range", Value: "1 0"}, + {Name: "net.ipv4.ip_unprivileged_port_start", Value: "1024"}, + {Name: "net.ipv4.ip_local_reserved_ports", Value: "1024-4999"}, + {Name: "net.ipv4.tcp_keepalive_time", Value: "7200"}, + {Name: "net.ipv4.tcp_fin_timeout", Value: "60"}, + {Name: "net.ipv4.tcp_keepalive_intvl", Value: "75"}, + {Name: "net.ipv4.tcp_keepalive_probes", Value: "9"}, + } + }), + } + }, + generateFail: func(p *corev1.Pod) []*corev1.Pod { + if p.Spec.SecurityContext == nil { + p.Spec.SecurityContext = &corev1.PodSecurityContext{} + } + return []*corev1.Pod{ + // sysctls with out of allowed name + tweak(p, func(p *corev1.Pod) { + p.Spec.SecurityContext.Sysctls = []corev1.Sysctl{{Name: "othersysctl", Value: "other"}} + }), + } + }, + } + registerFixtureGenerator( + fixtureKey{level: api.LevelBaseline, version: api.MajorMinorVersion(1, 29), check: "sysctls"}, + fixtureDataV1Dot29, + ) } diff --git a/staging/src/k8s.io/pod-security-admission/test/run.go b/staging/src/k8s.io/pod-security-admission/test/run.go index a10432b6de887..ac25ae98430ad 100644 --- a/staging/src/k8s.io/pod-security-admission/test/run.go +++ b/staging/src/k8s.io/pod-security-admission/test/run.go @@ -37,8 +37,8 @@ import ( ) const ( - newestMinorVersionToTest = 27 - podOSBasedRestrictionEnabledVersion = 27 + newestMinorVersionToTest = 29 + podOSBasedRestrictionEnabledVersion = 29 ) // Options hold configuration for running integration tests against an existing server. diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/apparmorprofile0.yaml new file mode 100755 index 0000000000000..87475d347ddca --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/apparmorprofile0.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: unconfined + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/apparmorprofile1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/apparmorprofile1.yaml new file mode 100755 index 0000000000000..5940a639ec474 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/apparmorprofile1.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/initcontainer1: unconfined + name: apparmorprofile1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline0.yaml new file mode 100755 index 0000000000000..e01a9dece8c49 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline0.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - NET_RAW + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline1.yaml new file mode 100755 index 0000000000000..92239d17896d3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: + add: + - NET_RAW + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline2.yaml new file mode 100755 index 0000000000000..089d8c184c2e7 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline2.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - chown + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline3.yaml new file mode 100755 index 0000000000000..4befa1edbea17 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/capabilities_baseline3.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - CAP_CHOWN + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces0.yaml new file mode 100755 index 0000000000000..1c4ca9a560a1d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces0.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + hostIPC: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces1.yaml new file mode 100755 index 0000000000000..7967a6d50a990 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces1.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces2.yaml new file mode 100755 index 0000000000000..00039668cd205 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostnamespaces2.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + hostPID: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostpathvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostpathvolumes0.yaml new file mode 100755 index 0000000000000..7f026136fae16 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostpathvolumes0.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + volumes: + - emptyDir: {} + name: volume-emptydir + - hostPath: + path: /a + name: volume-hostpath diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostpathvolumes1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostpathvolumes1.yaml new file mode 100755 index 0000000000000..382d27f4f4946 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostpathvolumes1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + volumes: + - hostPath: + path: /a + name: volume-hostpath-a + - hostPath: + path: /b + name: volume-hostpath-b diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports0.yaml new file mode 100755 index 0000000000000..ebfdcd48d0dee --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports0.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports1.yaml new file mode 100755 index 0000000000000..d9a2b97af3a3c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports1.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports2.yaml new file mode 100755 index 0000000000000..61b3388f0a75d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/hostports2.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + - containerPort: 12347 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 + - containerPort: 12348 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/privileged0.yaml new file mode 100755 index 0000000000000..e5cc7b94fdd92 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/privileged0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + privileged: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/privileged1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/privileged1.yaml new file mode 100755 index 0000000000000..31935b9955c18 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/privileged1.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + privileged: true + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/procmount0.yaml new file mode 100755 index 0000000000000..5e47a75fde5db --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/procmount0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + procMount: Unmasked + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/procmount1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/procmount1.yaml new file mode 100755 index 0000000000000..accf6c3d7feba --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/procmount1.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + procMount: Unmasked + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline0.yaml new file mode 100755 index 0000000000000..f455958da8288 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline0.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: + seccompProfile: + type: Unconfined diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline1.yaml new file mode 100755 index 0000000000000..8a86112acd10c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline1.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seccompProfile: + type: Unconfined + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline2.yaml new file mode 100755 index 0000000000000..21822558178a2 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/seccompprofile_baseline2.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seccompProfile: + type: Unconfined + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions0.yaml new file mode 100755 index 0000000000000..f3307078cd7b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions0.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: + type: somevalue diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions1.yaml new file mode 100755 index 0000000000000..6629d05efc43c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: + type: somevalue + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions2.yaml new file mode 100755 index 0000000000000..65876a92b6145 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions2.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: + type: somevalue + securityContext: + seLinuxOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions3.yaml new file mode 100755 index 0000000000000..71d89fbe572fb --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions3.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: + user: somevalue diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions4.yaml new file mode 100755 index 0000000000000..74e05cbb709a8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/selinuxoptions4.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: + role: somevalue diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/sysctls0.yaml new file mode 100755 index 0000000000000..81508d69e60ff --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/sysctls0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + sysctls: + - name: othersysctl + value: other diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/windowshostprocess0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/windowshostprocess0.yaml new file mode 100755 index 0000000000000..1e506b1f8037c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/windowshostprocess0.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + windowsOptions: {} + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + windowsOptions: {} + securityContext: + windowsOptions: + hostProcess: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/windowshostprocess1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/windowshostprocess1.yaml new file mode 100755 index 0000000000000..1a9d3e94a0ea8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/fail/windowshostprocess1.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + windowsOptions: + hostProcess: true + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + windowsOptions: + hostProcess: true + securityContext: + windowsOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/apparmorprofile0.yaml new file mode 100755 index 0000000000000..213a6a6c411c4 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/apparmorprofile0.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: localhost/foo + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/base.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/base.yaml new file mode 100755 index 0000000000000..387a4be317071 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/base.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: base +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/capabilities_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/capabilities_baseline0.yaml new file mode 100755 index 0000000000000..df93c1cd65200 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/capabilities_baseline0.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/hostports0.yaml new file mode 100755 index 0000000000000..61fddccdbbe1a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/hostports0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/privileged0.yaml new file mode 100755 index 0000000000000..0b64b687c7aea --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/privileged0.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + privileged: false + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + privileged: false + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/procmount0.yaml new file mode 100755 index 0000000000000..e75080af28a65 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/procmount0.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + procMount: Default + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + procMount: Default + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/seccompprofile_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/seccompprofile_baseline0.yaml new file mode 100755 index 0000000000000..2e05d163254e9 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/seccompprofile_baseline0.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/selinuxoptions0.yaml new file mode 100755 index 0000000000000..dafa4dbc3dec8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/selinuxoptions0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/selinuxoptions1.yaml new file mode 100755 index 0000000000000..a2688f5c23efa --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/selinuxoptions1.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: + level: somevalue + type: container_init_t + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: + type: container_kvm_t + securityContext: + seLinuxOptions: + type: container_t diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/sysctls0.yaml new file mode 100755 index 0000000000000..2148dc0867ebb --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/sysctls0.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/sysctls1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/sysctls1.yaml new file mode 100755 index 0000000000000..0ab1ea65ce41a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.28/pass/sysctls1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + - name: net.ipv4.ip_local_port_range + value: 1024 65535 + - name: net.ipv4.tcp_syncookies + value: "0" + - name: net.ipv4.ping_group_range + value: 1 0 + - name: net.ipv4.ip_unprivileged_port_start + value: "1024" + - name: net.ipv4.ip_local_reserved_ports + value: 1024-4999 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/apparmorprofile0.yaml new file mode 100755 index 0000000000000..87475d347ddca --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/apparmorprofile0.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: unconfined + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/apparmorprofile1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/apparmorprofile1.yaml new file mode 100755 index 0000000000000..5940a639ec474 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/apparmorprofile1.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/initcontainer1: unconfined + name: apparmorprofile1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline0.yaml new file mode 100755 index 0000000000000..e01a9dece8c49 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline0.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - NET_RAW + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline1.yaml new file mode 100755 index 0000000000000..92239d17896d3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: + add: + - NET_RAW + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline2.yaml new file mode 100755 index 0000000000000..089d8c184c2e7 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline2.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - chown + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline3.yaml new file mode 100755 index 0000000000000..4befa1edbea17 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/capabilities_baseline3.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - CAP_CHOWN + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces0.yaml new file mode 100755 index 0000000000000..1c4ca9a560a1d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces0.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + hostIPC: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces1.yaml new file mode 100755 index 0000000000000..7967a6d50a990 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces1.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces2.yaml new file mode 100755 index 0000000000000..00039668cd205 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostnamespaces2.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + hostPID: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostpathvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostpathvolumes0.yaml new file mode 100755 index 0000000000000..7f026136fae16 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostpathvolumes0.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + volumes: + - emptyDir: {} + name: volume-emptydir + - hostPath: + path: /a + name: volume-hostpath diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostpathvolumes1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostpathvolumes1.yaml new file mode 100755 index 0000000000000..382d27f4f4946 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostpathvolumes1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + volumes: + - hostPath: + path: /a + name: volume-hostpath-a + - hostPath: + path: /b + name: volume-hostpath-b diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports0.yaml new file mode 100755 index 0000000000000..ebfdcd48d0dee --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports0.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports1.yaml new file mode 100755 index 0000000000000..d9a2b97af3a3c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports1.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports2.yaml new file mode 100755 index 0000000000000..61b3388f0a75d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/hostports2.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + - containerPort: 12347 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 + - containerPort: 12348 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/privileged0.yaml new file mode 100755 index 0000000000000..e5cc7b94fdd92 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/privileged0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + privileged: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/privileged1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/privileged1.yaml new file mode 100755 index 0000000000000..31935b9955c18 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/privileged1.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + privileged: true + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/procmount0.yaml new file mode 100755 index 0000000000000..5e47a75fde5db --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/procmount0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + procMount: Unmasked + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/procmount1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/procmount1.yaml new file mode 100755 index 0000000000000..accf6c3d7feba --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/procmount1.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + procMount: Unmasked + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline0.yaml new file mode 100755 index 0000000000000..f455958da8288 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline0.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: + seccompProfile: + type: Unconfined diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline1.yaml new file mode 100755 index 0000000000000..8a86112acd10c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline1.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seccompProfile: + type: Unconfined + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline2.yaml new file mode 100755 index 0000000000000..21822558178a2 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/seccompprofile_baseline2.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seccompProfile: + type: Unconfined + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions0.yaml new file mode 100755 index 0000000000000..f3307078cd7b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions0.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: + type: somevalue diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions1.yaml new file mode 100755 index 0000000000000..6629d05efc43c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions1.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: + type: somevalue + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions2.yaml new file mode 100755 index 0000000000000..65876a92b6145 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions2.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: + type: somevalue + securityContext: + seLinuxOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions3.yaml new file mode 100755 index 0000000000000..71d89fbe572fb --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions3.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: + user: somevalue diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions4.yaml new file mode 100755 index 0000000000000..74e05cbb709a8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/selinuxoptions4.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: + seLinuxOptions: + role: somevalue diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/sysctls0.yaml new file mode 100755 index 0000000000000..81508d69e60ff --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/sysctls0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + sysctls: + - name: othersysctl + value: other diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/windowshostprocess0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/windowshostprocess0.yaml new file mode 100755 index 0000000000000..1e506b1f8037c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/windowshostprocess0.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + windowsOptions: {} + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + windowsOptions: {} + securityContext: + windowsOptions: + hostProcess: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/windowshostprocess1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/windowshostprocess1.yaml new file mode 100755 index 0000000000000..1a9d3e94a0ea8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/fail/windowshostprocess1.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + windowsOptions: + hostProcess: true + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + windowsOptions: + hostProcess: true + securityContext: + windowsOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/apparmorprofile0.yaml new file mode 100755 index 0000000000000..213a6a6c411c4 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/apparmorprofile0.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: localhost/foo + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/base.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/base.yaml new file mode 100755 index 0000000000000..387a4be317071 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/base.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: base +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/capabilities_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/capabilities_baseline0.yaml new file mode 100755 index 0000000000000..df93c1cd65200 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/capabilities_baseline0.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/hostports0.yaml new file mode 100755 index 0000000000000..61fddccdbbe1a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/hostports0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/privileged0.yaml new file mode 100755 index 0000000000000..0b64b687c7aea --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/privileged0.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + privileged: false + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + privileged: false + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/procmount0.yaml new file mode 100755 index 0000000000000..e75080af28a65 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/procmount0.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + procMount: Default + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + procMount: Default + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/seccompprofile_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/seccompprofile_baseline0.yaml new file mode 100755 index 0000000000000..2e05d163254e9 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/seccompprofile_baseline0.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/selinuxoptions0.yaml new file mode 100755 index 0000000000000..dafa4dbc3dec8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/selinuxoptions0.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: {} + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/selinuxoptions1.yaml new file mode 100755 index 0000000000000..a2688f5c23efa --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/selinuxoptions1.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + seLinuxOptions: + level: somevalue + type: container_init_t + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + seLinuxOptions: + type: container_kvm_t + securityContext: + seLinuxOptions: + type: container_t diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/sysctls0.yaml new file mode 100755 index 0000000000000..2148dc0867ebb --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/sysctls0.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/sysctls1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/sysctls1.yaml new file mode 100755 index 0000000000000..f8e68e6c44a30 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/baseline/v1.29/pass/sysctls1.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + - name: net.ipv4.ip_local_port_range + value: 1024 65535 + - name: net.ipv4.tcp_syncookies + value: "0" + - name: net.ipv4.ping_group_range + value: 1 0 + - name: net.ipv4.ip_unprivileged_port_start + value: "1024" + - name: net.ipv4.ip_local_reserved_ports + value: 1024-4999 + - name: net.ipv4.tcp_keepalive_time + value: "7200" + - name: net.ipv4.tcp_fin_timeout + value: "60" + - name: net.ipv4.tcp_keepalive_intvl + value: "75" + - name: net.ipv4.tcp_keepalive_probes + value: "9" diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation0.yaml new file mode 100755 index 0000000000000..837b55acc9513 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation1.yaml new file mode 100755 index 0000000000000..6189466557900 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation2.yaml new file mode 100755 index 0000000000000..9302cc63494e1 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation2.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation3.yaml new file mode 100755 index 0000000000000..083ce350f4e73 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/allowprivilegeescalation3.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/apparmorprofile0.yaml new file mode 100755 index 0000000000000..14de67ea27c4e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/apparmorprofile0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: unconfined + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/apparmorprofile1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/apparmorprofile1.yaml new file mode 100755 index 0000000000000..0e4313b54219f --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/apparmorprofile1.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/initcontainer1: unconfined + name: apparmorprofile1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline0.yaml new file mode 100755 index 0000000000000..2be0164f3e157 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline1.yaml new file mode 100755 index 0000000000000..f68d6b3883069 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline1.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline2.yaml new file mode 100755 index 0000000000000..702bd87de6e9c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline2.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - chown + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline3.yaml new file mode 100755 index 0000000000000..3e6aa463175b7 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_baseline3.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CAP_CHOWN + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted0.yaml new file mode 100755 index 0000000000000..857c11b86bbd3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted0.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted1.yaml new file mode 100755 index 0000000000000..9c987673a0a6c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted1.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: {} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted2.yaml new file mode 100755 index 0000000000000..be25f6aeac1d5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted2.yaml @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - SYS_TIME + - SYS_MODULE + - SYS_RAWIO + - SYS_PACCT + - SYS_ADMIN + - SYS_NICE + - SYS_RESOURCE + - SYS_TIME + - SYS_TTY_CONFIG + - MKNOD + - AUDIT_WRITE + - AUDIT_CONTROL + - MAC_OVERRIDE + - MAC_ADMIN + - NET_ADMIN + - SYSLOG + - CHOWN + - NET_RAW + - DAC_OVERRIDE + - FOWNER + - DAC_READ_SEARCH + - FSETID + - KILL + - SETGID + - SETUID + - LINUX_IMMUTABLE + - NET_BIND_SERVICE + - NET_BROADCAST + - IPC_LOCK + - IPC_OWNER + - SYS_CHROOT + - SYS_PTRACE + - SYS_BOOT + - LEASE + - SETFCAP + - WAKE_ALARM + - BLOCK_SUSPEND + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - SYS_TIME + - SYS_MODULE + - SYS_RAWIO + - SYS_PACCT + - SYS_ADMIN + - SYS_NICE + - SYS_RESOURCE + - SYS_TIME + - SYS_TTY_CONFIG + - MKNOD + - AUDIT_WRITE + - AUDIT_CONTROL + - MAC_OVERRIDE + - MAC_ADMIN + - NET_ADMIN + - SYSLOG + - CHOWN + - NET_RAW + - DAC_OVERRIDE + - FOWNER + - DAC_READ_SEARCH + - FSETID + - KILL + - SETGID + - SETUID + - LINUX_IMMUTABLE + - NET_BIND_SERVICE + - NET_BROADCAST + - IPC_LOCK + - IPC_OWNER + - SYS_CHROOT + - SYS_PTRACE + - SYS_BOOT + - LEASE + - SETFCAP + - WAKE_ALARM + - BLOCK_SUSPEND + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted3.yaml new file mode 100755 index 0000000000000..517cc3cbc2002 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/capabilities_restricted3.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces0.yaml new file mode 100755 index 0000000000000..c1a7b7a4ba928 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostIPC: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces1.yaml new file mode 100755 index 0000000000000..caa294e373c4a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces2.yaml new file mode 100755 index 0000000000000..32350899785db --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostnamespaces2.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostPID: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostpathvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostpathvolumes0.yaml new file mode 100755 index 0000000000000..86745e64a08e3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostpathvolumes0.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - emptyDir: {} + name: volume-emptydir + - hostPath: + path: /a + name: volume-hostpath diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostpathvolumes1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostpathvolumes1.yaml new file mode 100755 index 0000000000000..bc7759c203659 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostpathvolumes1.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - hostPath: + path: /a + name: volume-hostpath-a + - hostPath: + path: /b + name: volume-hostpath-b diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports0.yaml new file mode 100755 index 0000000000000..9bf9055d9ee10 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports0.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports1.yaml new file mode 100755 index 0000000000000..ddecbf4925d86 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports1.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports2.yaml new file mode 100755 index 0000000000000..ed9f6920981d6 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/hostports2.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + - containerPort: 12347 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 + - containerPort: 12348 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/privileged0.yaml new file mode 100755 index 0000000000000..7ad39f5c045b8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/privileged0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + drop: + - ALL + privileged: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/privileged1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/privileged1.yaml new file mode 100755 index 0000000000000..cb41dcb3aa4dd --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/privileged1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: + drop: + - ALL + privileged: true + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/procmount0.yaml new file mode 100755 index 0000000000000..bd1b35c65bef0 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/procmount0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Unmasked + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/procmount1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/procmount1.yaml new file mode 100755 index 0000000000000..631fae1369e18 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/procmount1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Unmasked + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes0.yaml new file mode 100755 index 0000000000000..5a95336d26956 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - gcePersistentDisk: + pdName: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes1.yaml new file mode 100755 index 0000000000000..153326fea893c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes1.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - awsElasticBlockStore: + volumeID: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes10.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes10.yaml new file mode 100755 index 0000000000000..f34afe69ca897 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes10.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes10 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - flocker: + datasetName: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes11.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes11.yaml new file mode 100755 index 0000000000000..384e06f6b2301 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes11.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes11 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - fc: + wwids: + - test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes12.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes12.yaml new file mode 100755 index 0000000000000..8757fbf7fb4ba --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes12.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes12 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - azureFile: + secretName: test + shareName: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes13.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes13.yaml new file mode 100755 index 0000000000000..9e2086df359b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes13.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes13 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + vsphereVolume: + volumePath: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes14.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes14.yaml new file mode 100755 index 0000000000000..d8b9605e4d152 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes14.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes14 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + quobyte: + registry: localhost:1234 + volume: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes15.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes15.yaml new file mode 100755 index 0000000000000..f3462ab7f43e6 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes15.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes15 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - azureDisk: + diskName: test + diskURI: https://test.blob.core.windows.net/test/test.vhd + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes16.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes16.yaml new file mode 100755 index 0000000000000..d83daa6fcb142 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes16.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes16 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + portworxVolume: + fsType: ext4 + volumeID: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes17.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes17.yaml new file mode 100755 index 0000000000000..23f6b770e4644 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes17.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes17 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + scaleIO: + gateway: localhost + secretRef: null + system: test + volumeName: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes18.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes18.yaml new file mode 100755 index 0000000000000..ca5d93f57fd30 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes18.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes18 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + storageos: + volumeName: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes19.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes19.yaml new file mode 100755 index 0000000000000..4ca4381bec973 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes19.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes19 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - hostPath: + path: /dev/null + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes2.yaml new file mode 100755 index 0000000000000..9154458079c12 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes2.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - gitRepo: + repository: github.com/kubernetes/kubernetes + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes3.yaml new file mode 100755 index 0000000000000..f1060bc355198 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes3.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + nfs: + path: /test + server: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes4.yaml new file mode 100755 index 0000000000000..3a1447417e476 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes4.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - iscsi: + iqn: iqn.2001-04.com.example:storage.kube.sys1.xyz + lun: 0 + targetPortal: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes5.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes5.yaml new file mode 100755 index 0000000000000..e64cbe9ab50ce --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes5.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes5 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - glusterfs: + endpoints: test + path: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes6.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes6.yaml new file mode 100755 index 0000000000000..4d596c9e4156e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes6.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes6 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + rbd: + image: test + monitors: + - test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes7.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes7.yaml new file mode 100755 index 0000000000000..c3887a35c1222 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes7.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes7 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - flexVolume: + driver: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes8.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes8.yaml new file mode 100755 index 0000000000000..e11afbbe8ec1d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes8.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes8 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - cinder: + volumeID: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes9.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes9.yaml new file mode 100755 index 0000000000000..8159a4858b96b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/restrictedvolumes9.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes9 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - cephfs: + monitors: + - test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot0.yaml new file mode 100755 index 0000000000000..f460f659d94d3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot0.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot1.yaml new file mode 100755 index 0000000000000..285409793ea15 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot2.yaml new file mode 100755 index 0000000000000..067c7970fa7e3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot2.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: false + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot3.yaml new file mode 100755 index 0000000000000..5459f294e0b5c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasnonroot3.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser0.yaml new file mode 100755 index 0000000000000..5f7c9e0f0055a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + runAsUser: 0 + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser1.yaml new file mode 100755 index 0000000000000..ff62334ead6b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 0 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser2.yaml new file mode 100755 index 0000000000000..26c713497d0d0 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/runasuser2.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 0 + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline0.yaml new file mode 100755 index 0000000000000..0b875ce5f0194 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: Unconfined diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline1.yaml new file mode 100755 index 0000000000000..3e63c31668cde --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline1.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: Unconfined + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline2.yaml new file mode 100755 index 0000000000000..4cd99407164bf --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_baseline2.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: Unconfined + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted0.yaml new file mode 100755 index 0000000000000..64b5604b5a4e3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted0.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted1.yaml new file mode 100755 index 0000000000000..2ec3d48dfb6af --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: Unconfined diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted2.yaml new file mode 100755 index 0000000000000..c63c622a6add8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted2.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted3.yaml new file mode 100755 index 0000000000000..69c969f8a6819 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted3.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted4.yaml new file mode 100755 index 0000000000000..b17bf7648e41b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/seccompprofile_restricted4.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: Unconfined + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions0.yaml new file mode 100755 index 0000000000000..7135bb20b8e24 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: + type: somevalue + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions1.yaml new file mode 100755 index 0000000000000..c99b8a5ed4f6b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions1.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + type: somevalue + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: {} + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions2.yaml new file mode 100755 index 0000000000000..f2eafc2512bec --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions2.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + type: somevalue + securityContext: + runAsNonRoot: true + seLinuxOptions: {} + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions3.yaml new file mode 100755 index 0000000000000..1da063ebd1f16 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions3.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: + user: somevalue + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions4.yaml new file mode 100755 index 0000000000000..a4a38fb6034a9 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/selinuxoptions4.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: + role: somevalue + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/sysctls0.yaml new file mode 100755 index 0000000000000..841f73d238f5c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/sysctls0.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + sysctls: + - name: othersysctl + value: other diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/windowshostprocess0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/windowshostprocess0.yaml new file mode 100755 index 0000000000000..4262e6a5b8269 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/windowshostprocess0.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: {} + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: {} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/windowshostprocess1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/windowshostprocess1.yaml new file mode 100755 index 0000000000000..ba1ce4a472f05 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/fail/windowshostprocess1.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: + hostProcess: true + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: + hostProcess: true + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + windowsOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/apparmorprofile0.yaml new file mode 100755 index 0000000000000..53ebdaa01393e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/apparmorprofile0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: localhost/foo + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/base.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/base.yaml new file mode 100755 index 0000000000000..3b4f3077dccd6 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/base.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: base +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/capabilities_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/capabilities_restricted0.yaml new file mode 100755 index 0000000000000..8a70cb3efdbd4 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/capabilities_restricted0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/hostports0.yaml new file mode 100755 index 0000000000000..e7f1153589429 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/hostports0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/privileged0.yaml new file mode 100755 index 0000000000000..8e3aafdd8f17c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/privileged0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/procmount0.yaml new file mode 100755 index 0000000000000..aacd7351a8a3b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/procmount0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Default + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Default + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/restrictedvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/restrictedvolumes0.yaml new file mode 100755 index 0000000000000..a11722485c5a7 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/restrictedvolumes0.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume0 + - emptyDir: {} + name: volume1 + - name: volume2 + secret: + secretName: test + - name: volume3 + persistentVolumeClaim: + claimName: test + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + name: volume4 + - configMap: + name: test + name: volume5 + - name: volume6 + projected: + sources: [] diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasnonroot0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasnonroot0.yaml new file mode 100755 index 0000000000000..414ac79b469e9 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasnonroot0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasnonroot1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasnonroot1.yaml new file mode 100755 index 0000000000000..549b013e53f8f --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasnonroot1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasuser0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasuser0.yaml new file mode 100755 index 0000000000000..ed7aff0fa1229 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/runasuser0.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 1000 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted0.yaml new file mode 100755 index 0000000000000..f904065ce466a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted1.yaml new file mode 100755 index 0000000000000..5a60fd7c59b62 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + localhostProfile: testing + type: Localhost diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted2.yaml new file mode 100755 index 0000000000000..39d68e386b69b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/seccompprofile_restricted2.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + localhostProfile: testing + type: Localhost + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/selinuxoptions0.yaml new file mode 100755 index 0000000000000..a45080b742590 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/selinuxoptions0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/selinuxoptions1.yaml new file mode 100755 index 0000000000000..0a8365605e96d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/selinuxoptions1.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + level: somevalue + type: container_init_t + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + type: container_kvm_t + securityContext: + runAsNonRoot: true + seLinuxOptions: + type: container_t + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/sysctls0.yaml new file mode 100755 index 0000000000000..84224ffa94d65 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/sysctls0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/sysctls1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/sysctls1.yaml new file mode 100755 index 0000000000000..29c925b97d5b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.28/pass/sysctls1.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + - name: net.ipv4.ip_local_port_range + value: 1024 65535 + - name: net.ipv4.tcp_syncookies + value: "0" + - name: net.ipv4.ping_group_range + value: 1 0 + - name: net.ipv4.ip_unprivileged_port_start + value: "1024" + - name: net.ipv4.ip_local_reserved_ports + value: 1024-4999 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation0.yaml new file mode 100755 index 0000000000000..837b55acc9513 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation1.yaml new file mode 100755 index 0000000000000..6189466557900 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation2.yaml new file mode 100755 index 0000000000000..9302cc63494e1 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation2.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation3.yaml new file mode 100755 index 0000000000000..083ce350f4e73 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/allowprivilegeescalation3.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: allowprivilegeescalation3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/apparmorprofile0.yaml new file mode 100755 index 0000000000000..14de67ea27c4e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/apparmorprofile0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: unconfined + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/apparmorprofile1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/apparmorprofile1.yaml new file mode 100755 index 0000000000000..0e4313b54219f --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/apparmorprofile1.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/initcontainer1: unconfined + name: apparmorprofile1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline0.yaml new file mode 100755 index 0000000000000..2be0164f3e157 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline1.yaml new file mode 100755 index 0000000000000..f68d6b3883069 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline1.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline2.yaml new file mode 100755 index 0000000000000..702bd87de6e9c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline2.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - chown + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline3.yaml new file mode 100755 index 0000000000000..3e6aa463175b7 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_baseline3.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_baseline3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CAP_CHOWN + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted0.yaml new file mode 100755 index 0000000000000..857c11b86bbd3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted0.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted1.yaml new file mode 100755 index 0000000000000..9c987673a0a6c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted1.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: {} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted2.yaml new file mode 100755 index 0000000000000..be25f6aeac1d5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted2.yaml @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - SYS_TIME + - SYS_MODULE + - SYS_RAWIO + - SYS_PACCT + - SYS_ADMIN + - SYS_NICE + - SYS_RESOURCE + - SYS_TIME + - SYS_TTY_CONFIG + - MKNOD + - AUDIT_WRITE + - AUDIT_CONTROL + - MAC_OVERRIDE + - MAC_ADMIN + - NET_ADMIN + - SYSLOG + - CHOWN + - NET_RAW + - DAC_OVERRIDE + - FOWNER + - DAC_READ_SEARCH + - FSETID + - KILL + - SETGID + - SETUID + - LINUX_IMMUTABLE + - NET_BIND_SERVICE + - NET_BROADCAST + - IPC_LOCK + - IPC_OWNER + - SYS_CHROOT + - SYS_PTRACE + - SYS_BOOT + - LEASE + - SETFCAP + - WAKE_ALARM + - BLOCK_SUSPEND + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - SYS_TIME + - SYS_MODULE + - SYS_RAWIO + - SYS_PACCT + - SYS_ADMIN + - SYS_NICE + - SYS_RESOURCE + - SYS_TIME + - SYS_TTY_CONFIG + - MKNOD + - AUDIT_WRITE + - AUDIT_CONTROL + - MAC_OVERRIDE + - MAC_ADMIN + - NET_ADMIN + - SYSLOG + - CHOWN + - NET_RAW + - DAC_OVERRIDE + - FOWNER + - DAC_READ_SEARCH + - FSETID + - KILL + - SETGID + - SETUID + - LINUX_IMMUTABLE + - NET_BIND_SERVICE + - NET_BROADCAST + - IPC_LOCK + - IPC_OWNER + - SYS_CHROOT + - SYS_PTRACE + - SYS_BOOT + - LEASE + - SETFCAP + - WAKE_ALARM + - BLOCK_SUSPEND + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted3.yaml new file mode 100755 index 0000000000000..517cc3cbc2002 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/capabilities_restricted3.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - AUDIT_WRITE + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - MKNOD + - NET_BIND_SERVICE + - SETFCAP + - SETGID + - SETPCAP + - SETUID + - SYS_CHROOT + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces0.yaml new file mode 100755 index 0000000000000..c1a7b7a4ba928 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostIPC: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces1.yaml new file mode 100755 index 0000000000000..caa294e373c4a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces2.yaml new file mode 100755 index 0000000000000..32350899785db --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostnamespaces2.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostnamespaces2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + hostPID: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostpathvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostpathvolumes0.yaml new file mode 100755 index 0000000000000..86745e64a08e3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostpathvolumes0.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - emptyDir: {} + name: volume-emptydir + - hostPath: + path: /a + name: volume-hostpath diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostpathvolumes1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostpathvolumes1.yaml new file mode 100755 index 0000000000000..bc7759c203659 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostpathvolumes1.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostpathvolumes1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - hostPath: + path: /a + name: volume-hostpath-a + - hostPath: + path: /b + name: volume-hostpath-b diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports0.yaml new file mode 100755 index 0000000000000..9bf9055d9ee10 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports0.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports1.yaml new file mode 100755 index 0000000000000..ddecbf4925d86 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports1.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports2.yaml new file mode 100755 index 0000000000000..ed9f6920981d6 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/hostports2.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + hostPort: 12345 + - containerPort: 12347 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + hostPort: 12346 + - containerPort: 12348 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/privileged0.yaml new file mode 100755 index 0000000000000..7ad39f5c045b8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/privileged0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + capabilities: + drop: + - ALL + privileged: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/privileged1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/privileged1.yaml new file mode 100755 index 0000000000000..cb41dcb3aa4dd --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/privileged1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + capabilities: + drop: + - ALL + privileged: true + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/procmount0.yaml new file mode 100755 index 0000000000000..bd1b35c65bef0 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/procmount0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Unmasked + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/procmount1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/procmount1.yaml new file mode 100755 index 0000000000000..631fae1369e18 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/procmount1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Unmasked + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes0.yaml new file mode 100755 index 0000000000000..5a95336d26956 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - gcePersistentDisk: + pdName: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes1.yaml new file mode 100755 index 0000000000000..153326fea893c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes1.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - awsElasticBlockStore: + volumeID: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes10.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes10.yaml new file mode 100755 index 0000000000000..f34afe69ca897 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes10.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes10 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - flocker: + datasetName: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes11.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes11.yaml new file mode 100755 index 0000000000000..384e06f6b2301 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes11.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes11 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - fc: + wwids: + - test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes12.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes12.yaml new file mode 100755 index 0000000000000..8757fbf7fb4ba --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes12.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes12 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - azureFile: + secretName: test + shareName: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes13.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes13.yaml new file mode 100755 index 0000000000000..9e2086df359b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes13.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes13 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + vsphereVolume: + volumePath: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes14.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes14.yaml new file mode 100755 index 0000000000000..d8b9605e4d152 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes14.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes14 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + quobyte: + registry: localhost:1234 + volume: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes15.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes15.yaml new file mode 100755 index 0000000000000..f3462ab7f43e6 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes15.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes15 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - azureDisk: + diskName: test + diskURI: https://test.blob.core.windows.net/test/test.vhd + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes16.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes16.yaml new file mode 100755 index 0000000000000..d83daa6fcb142 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes16.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes16 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + portworxVolume: + fsType: ext4 + volumeID: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes17.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes17.yaml new file mode 100755 index 0000000000000..23f6b770e4644 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes17.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes17 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + scaleIO: + gateway: localhost + secretRef: null + system: test + volumeName: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes18.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes18.yaml new file mode 100755 index 0000000000000..ca5d93f57fd30 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes18.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes18 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + storageos: + volumeName: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes19.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes19.yaml new file mode 100755 index 0000000000000..4ca4381bec973 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes19.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes19 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - hostPath: + path: /dev/null + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes2.yaml new file mode 100755 index 0000000000000..9154458079c12 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes2.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - gitRepo: + repository: github.com/kubernetes/kubernetes + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes3.yaml new file mode 100755 index 0000000000000..f1060bc355198 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes3.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + nfs: + path: /test + server: test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes4.yaml new file mode 100755 index 0000000000000..3a1447417e476 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes4.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - iscsi: + iqn: iqn.2001-04.com.example:storage.kube.sys1.xyz + lun: 0 + targetPortal: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes5.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes5.yaml new file mode 100755 index 0000000000000..e64cbe9ab50ce --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes5.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes5 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - glusterfs: + endpoints: test + path: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes6.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes6.yaml new file mode 100755 index 0000000000000..4d596c9e4156e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes6.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes6 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume1 + rbd: + image: test + monitors: + - test diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes7.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes7.yaml new file mode 100755 index 0000000000000..c3887a35c1222 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes7.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes7 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - flexVolume: + driver: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes8.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes8.yaml new file mode 100755 index 0000000000000..e11afbbe8ec1d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes8.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes8 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - cinder: + volumeID: test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes9.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes9.yaml new file mode 100755 index 0000000000000..8159a4858b96b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/restrictedvolumes9.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes9 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - cephfs: + monitors: + - test + name: volume1 diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot0.yaml new file mode 100755 index 0000000000000..f460f659d94d3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot0.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot1.yaml new file mode 100755 index 0000000000000..285409793ea15 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot2.yaml new file mode 100755 index 0000000000000..067c7970fa7e3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot2.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: false + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot3.yaml new file mode 100755 index 0000000000000..5459f294e0b5c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasnonroot3.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser0.yaml new file mode 100755 index 0000000000000..5f7c9e0f0055a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + runAsUser: 0 + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser1.yaml new file mode 100755 index 0000000000000..ff62334ead6b5 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 0 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser2.yaml new file mode 100755 index 0000000000000..26c713497d0d0 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/runasuser2.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 0 + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline0.yaml new file mode 100755 index 0000000000000..0b875ce5f0194 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: Unconfined diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline1.yaml new file mode 100755 index 0000000000000..3e63c31668cde --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline1.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: Unconfined + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline2.yaml new file mode 100755 index 0000000000000..4cd99407164bf --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_baseline2.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_baseline2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: Unconfined + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted0.yaml new file mode 100755 index 0000000000000..64b5604b5a4e3 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted0.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted1.yaml new file mode 100755 index 0000000000000..2ec3d48dfb6af --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted1.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: Unconfined diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted2.yaml new file mode 100755 index 0000000000000..c63c622a6add8 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted2.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted3.yaml new file mode 100755 index 0000000000000..69c969f8a6819 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted3.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted4.yaml new file mode 100755 index 0000000000000..b17bf7648e41b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/seccompprofile_restricted4.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: Unconfined + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions0.yaml new file mode 100755 index 0000000000000..7135bb20b8e24 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: + type: somevalue + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions1.yaml new file mode 100755 index 0000000000000..c99b8a5ed4f6b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions1.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + type: somevalue + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: {} + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions2.yaml new file mode 100755 index 0000000000000..f2eafc2512bec --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions2.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + type: somevalue + securityContext: + runAsNonRoot: true + seLinuxOptions: {} + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions3.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions3.yaml new file mode 100755 index 0000000000000..1da063ebd1f16 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions3.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions3 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: + user: somevalue + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions4.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions4.yaml new file mode 100755 index 0000000000000..a4a38fb6034a9 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/selinuxoptions4.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions4 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seLinuxOptions: + role: somevalue + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/sysctls0.yaml new file mode 100755 index 0000000000000..841f73d238f5c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/sysctls0.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + sysctls: + - name: othersysctl + value: other diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/windowshostprocess0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/windowshostprocess0.yaml new file mode 100755 index 0000000000000..4262e6a5b8269 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/windowshostprocess0.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: {} + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: {} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + windowsOptions: + hostProcess: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/windowshostprocess1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/windowshostprocess1.yaml new file mode 100755 index 0000000000000..ba1ce4a472f05 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/fail/windowshostprocess1.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + name: windowshostprocess1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: + hostProcess: true + hostNetwork: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + windowsOptions: + hostProcess: true + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + windowsOptions: {} diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/apparmorprofile0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/apparmorprofile0.yaml new file mode 100755 index 0000000000000..53ebdaa01393e --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/apparmorprofile0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/container1: localhost/foo + name: apparmorprofile0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base.yaml new file mode 100755 index 0000000000000..3b4f3077dccd6 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: base +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.27/pass/base_linux.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base_linux.yaml similarity index 100% rename from staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.27/pass/base_linux.yaml rename to staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base_linux.yaml diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.27/pass/base_windows.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base_windows.yaml similarity index 100% rename from staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.27/pass/base_windows.yaml rename to staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/base_windows.yaml diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/capabilities_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/capabilities_restricted0.yaml new file mode 100755 index 0000000000000..8a70cb3efdbd4 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/capabilities_restricted0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: capabilities_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/hostports0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/hostports0.yaml new file mode 100755 index 0000000000000..e7f1153589429 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/hostports0.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hostports0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + ports: + - containerPort: 12345 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + ports: + - containerPort: 12346 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/privileged0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/privileged0.yaml new file mode 100755 index 0000000000000..8e3aafdd8f17c --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/privileged0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: privileged0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/procmount0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/procmount0.yaml new file mode 100755 index 0000000000000..aacd7351a8a3b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/procmount0.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: procmount0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Default + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + procMount: Default + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/restrictedvolumes0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/restrictedvolumes0.yaml new file mode 100755 index 0000000000000..a11722485c5a7 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/restrictedvolumes0.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Pod +metadata: + name: restrictedvolumes0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumes: + - name: volume0 + - emptyDir: {} + name: volume1 + - name: volume2 + secret: + secretName: test + - name: volume3 + persistentVolumeClaim: + claimName: test + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + name: volume4 + - configMap: + name: test + name: volume5 + - name: volume6 + projected: + sources: [] diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasnonroot0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasnonroot0.yaml new file mode 100755 index 0000000000000..414ac79b469e9 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasnonroot0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasnonroot1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasnonroot1.yaml new file mode 100755 index 0000000000000..549b013e53f8f --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasnonroot1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasnonroot1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasuser0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasuser0.yaml new file mode 100755 index 0000000000000..ed7aff0fa1229 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/runasuser0.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: runasuser0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 1000 + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted0.yaml new file mode 100755 index 0000000000000..f904065ce466a --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted1.yaml new file mode 100755 index 0000000000000..5a60fd7c59b62 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted1.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + localhostProfile: testing + type: Localhost diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted2.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted2.yaml new file mode 100755 index 0000000000000..39d68e386b69b --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/seccompprofile_restricted2.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: seccompprofile_restricted2 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + localhostProfile: testing + type: Localhost + securityContext: + runAsNonRoot: true diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/selinuxoptions0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/selinuxoptions0.yaml new file mode 100755 index 0000000000000..a45080b742590 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/selinuxoptions0.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: {} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/selinuxoptions1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/selinuxoptions1.yaml new file mode 100755 index 0000000000000..0a8365605e96d --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/selinuxoptions1.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: selinuxoptions1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + level: somevalue + type: container_init_t + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seLinuxOptions: + type: container_kvm_t + securityContext: + runAsNonRoot: true + seLinuxOptions: + type: container_t + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/sysctls0.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/sysctls0.yaml new file mode 100755 index 0000000000000..84224ffa94d65 --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/sysctls0.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls0 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/sysctls1.yaml b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/sysctls1.yaml new file mode 100755 index 0000000000000..0fa413ac4b13f --- /dev/null +++ b/staging/src/k8s.io/pod-security-admission/test/testdata/restricted/v1.29/pass/sysctls1.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sysctls1 +spec: + containers: + - image: registry.k8s.io/pause + name: container1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + initContainers: + - image: registry.k8s.io/pause + name: initcontainer1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + - name: net.ipv4.ip_local_port_range + value: 1024 65535 + - name: net.ipv4.tcp_syncookies + value: "0" + - name: net.ipv4.ping_group_range + value: 1 0 + - name: net.ipv4.ip_unprivileged_port_start + value: "1024" + - name: net.ipv4.ip_local_reserved_ports + value: 1024-4999 + - name: net.ipv4.tcp_keepalive_time + value: "7200" + - name: net.ipv4.tcp_fin_timeout + value: "60" + - name: net.ipv4.tcp_keepalive_intvl + value: "75" + - name: net.ipv4.tcp_keepalive_probes + value: "9" diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index 8e84b12dbc34b..3b8a8ab6c14b2 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -2,7 +2,7 @@ module k8s.io/sample-apiserver -go 1.20 +go 1.21.3 require ( github.com/google/gofuzz v1.2.0 @@ -12,7 +12,7 @@ require ( k8s.io/client-go v0.0.0 k8s.io/code-generator v0.0.0 k8s.io/component-base v0.0.0 - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/structured-merge-diff/v4 v4.3.0 ) @@ -28,10 +28,10 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -45,7 +45,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -65,45 +65,44 @@ require ( go.etcd.io/etcd/api/v3 v3.5.9 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect - go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/otel/trace v1.10.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.54.0 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.0.0 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kms v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index de6e17383b7cc..07e5a9f2cb8e8 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -1,163 +1,126 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -174,25 +137,12 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -205,27 +155,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -250,79 +190,33 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.17.6 h1:QDvHTIJunIsbgN8yVukx0HGnsqVLSY6xGqo+17IjIyM= github.com/google/cel-go v0.17.6/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -332,11 +226,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -348,8 +239,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -377,10 +266,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -390,7 +279,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= @@ -398,7 +286,6 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -406,7 +293,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -423,16 +309,14 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= @@ -451,32 +335,24 @@ go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -489,286 +365,73 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -776,14 +439,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -791,27 +452,17 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go index 2be535e7cb03f..c39ef3e7bbfa5 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/cmd/server/start.go @@ -28,7 +28,6 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/endpoints/openapi" - "k8s.io/apiserver/pkg/features" genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -123,8 +122,6 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { return nil, fmt.Errorf("error creating self-signed certificates: %v", err) } - o.RecommendedOptions.Etcd.StorageConfig.Paging = true - o.RecommendedOptions.ExtraAdmissionInitializers = func(c *genericapiserver.RecommendedConfig) ([]admission.PluginInitializer, error) { client, err := clientset.NewForConfig(c.LoopbackClientConfig) if err != nil { @@ -141,11 +138,9 @@ func (o *WardleServerOptions) Config() (*apiserver.Config, error) { serverConfig.OpenAPIConfig.Info.Title = "Wardle" serverConfig.OpenAPIConfig.Info.Version = "0.1" - if utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { - serverConfig.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config(sampleopenapi.GetOpenAPIDefinitions, openapi.NewDefinitionNamer(apiserver.Scheme)) - serverConfig.OpenAPIV3Config.Info.Title = "Wardle" - serverConfig.OpenAPIV3Config.Info.Version = "0.1" - } + serverConfig.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config(sampleopenapi.GetOpenAPIDefinitions, openapi.NewDefinitionNamer(apiserver.Scheme)) + serverConfig.OpenAPIV3Config.Info.Title = "Wardle" + serverConfig.OpenAPIV3Config.Info.Version = "0.1" if err := o.RecommendedOptions.ApplyTo(serverConfig); err != nil { return nil, err diff --git a/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go b/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go index b8e970dc4029c..8cb9e6e2b3d1f 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/generated/openapi/zz_generated.openapi.go @@ -528,7 +528,6 @@ func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.Open "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -1084,8 +1083,7 @@ func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -1371,7 +1369,6 @@ func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.Ope "creationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2153,7 +2150,6 @@ func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenA "object": { SchemaProps: spec.SchemaProps{ Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -2352,7 +2348,6 @@ func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.Ope "object": { SchemaProps: spec.SchemaProps{ Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index a6215839ba2e1..f92f82d3fff64 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -2,7 +2,7 @@ module k8s.io/sample-cli-plugin -go 1.20 +go 1.21.3 require ( github.com/spf13/cobra v1.7.0 @@ -12,8 +12,9 @@ require ( ) require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.2.4 // indirect @@ -24,7 +25,6 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect @@ -35,6 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect @@ -43,12 +44,12 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect @@ -58,7 +59,7 @@ require ( k8s.io/api v0.0.0 // indirect k8s.io/apimachinery v0.0.0 // indirect k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index 3d135d2476aac..1a35e40e32dab 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -1,5 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -11,11 +14,13 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -97,6 +102,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -107,10 +114,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -136,8 +143,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -147,7 +154,7 @@ go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -164,34 +171,35 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -202,8 +210,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -244,11 +252,11 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index 13a5c33a1e4ea..59c0035a53a73 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -21,6 +21,8 @@ import ( "fmt" "time" + "golang.org/x/time/rate" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -103,6 +105,10 @@ func NewController( eventBroadcaster.StartStructuredLogging(0) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + ratelimiter := workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(50), 300)}, + ) controller := &Controller{ kubeclientset: kubeclientset, @@ -111,7 +117,7 @@ func NewController( deploymentsSynced: deploymentInformer.Informer().HasSynced, foosLister: fooInformer.Lister(), foosSynced: fooInformer.Informer().HasSynced, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Foos"), + workqueue: workqueue.NewRateLimitingQueue(ratelimiter), recorder: recorder, } diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index 84bdbb319b6d6..f805c58ed6b55 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -2,9 +2,10 @@ module k8s.io/sample-controller -go 1.20 +go 1.21.3 require ( + golang.org/x/time v0.3.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v0.0.0 @@ -14,7 +15,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -36,21 +37,20 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/tools v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum index 1453f4986da4b..949607004ddf2 100644 --- a/staging/src/k8s.io/sample-controller/go.sum +++ b/staging/src/k8s.io/sample-controller/go.sum @@ -1,4 +1,5 @@ -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -6,8 +7,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -74,10 +75,10 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -94,44 +95,44 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -139,8 +140,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -164,13 +165,13 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/test/cmd/debug.sh b/test/cmd/debug.sh index afa84b83a58d5..73cde0bfd6b62 100755 --- a/test/cmd/debug.sh +++ b/test/cmd/debug.sh @@ -291,7 +291,6 @@ run_kubectl_debug_restricted_tests() { kube::log::status "Testing kubectl debug profile restricted" ### Pod Troubleshooting by ephemeral containers with restricted profile - # Pre-Condition: Pod "nginx" is created kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -304,6 +303,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target "${kube_flags[@]:?}" + ### Pod Troubleshooting by pod copy with restricted profile # Pre-Condition: Pod "nginx" is created kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -324,6 +324,7 @@ run_kubectl_debug_restricted_tests() { output_message=$(kubectl get namespaces "${ns_name}" --show-labels) kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted' + ### Pod Troubleshooting by ephemeral containers with restricted profile (restricted namespace) # Pre-Condition: Pod "busybox" is created that complies with the restricted policy kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -336,6 +337,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target -n "${ns_name}" "${kube_flags[@]:?}" + ### Pod Troubleshooting by pod copy with restricted profile (restricted namespace) # Pre-Condition: Pod "nginx" is created kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -349,6 +351,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}" + ### Pod Troubleshooting by ephemeral containers with restricted profile (restricted namespace) # Pre-Condition: Pod "busybox" is created that complies with the restricted policy kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -361,6 +364,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target -n ${ns_name} "${kube_flags[@]:?}" + ### Pod Troubleshooting by pod copy with restricted profile (restricted namespace) # Pre-Condition: Pod "nginx" is created kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -388,8 +392,7 @@ run_kubectl_debug_restricted_node_tests() { create_and_use_new_namespace kube::log::status "Testing kubectl debug profile restricted (node)" - ### Debug node with restrected profile - + ### Debug node with restricted profile # Pre-Condition: node exists kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' # Restricted profile just works in not restricted namespace @@ -422,6 +425,7 @@ run_kubectl_debug_restricted_node_tests() { output_message=$(kubectl get namespaces "${ns_name}" --show-labels) kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted' + ### Debug node with restricted profile (restricted namespace) # Pre-Condition: node exists kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' # Restricted profile works in restricted namespace @@ -452,4 +456,72 @@ run_kubectl_debug_restricted_node_tests() { set +o nounset set +o errexit -} \ No newline at end of file +} + +run_kubectl_debug_netadmin_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile netadmin" + + ### Pod Troubleshooting by ephemeral containers with netadmin profile + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: add a new debug container with netadmin profile + output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=netadmin "${kube_flags[@]:?}") + # Post-Conditions + kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:' + kube::test::get_object_assert pod/target '{{(index (index .spec.ephemeralContainers 0).securityContext.capabilities.add)}}' '\[NET_ADMIN NET_RAW\]' + # Clean up + kubectl delete pod target "${kube_flags[@]:?}" + + ### Pod Troubleshooting by pod copy with netadmin profile + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: create a copy of target with a new debug container + kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=netadmin "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:" + kube::test::get_object_assert pod/target-copy '{{.spec.shareProcessNamespace}}' 'true' + kube::test::get_object_assert pod/target-copy '{{(index (index .spec.containers 1).securityContext.capabilities.add)}}' '\[NET_ADMIN NET_RAW\]' + # Clean up + kubectl delete pod target target-copy "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} + +run_kubectl_debug_netadmin_node_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile netadmin (node)" + + ### Debug node with netadmin profile + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile netadmin node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true) + # Post-Conditions + kube::test::get_object_assert pod "{{(len .items)}}" '1' + debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "capabilities" "add"}}' '\[NET_ADMIN NET_RAW\]' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} diff --git a/test/cmd/delete.sh b/test/cmd/delete.sh index b510a41a28d92..0d47302be0bf4 100755 --- a/test/cmd/delete.sh +++ b/test/cmd/delete.sh @@ -58,9 +58,6 @@ run_kubectl_delete_interactive_tests() { set -o nounset set -o errexit - # enable interactivity flag feature environment variable - export KUBECTL_INTERACTIVE_DELETE=true - ns_one="namespace-$(date +%s)-${RANDOM}" ns_two="namespace-$(date +%s)-${RANDOM}" kubectl create namespace "${ns_one}" @@ -119,8 +116,6 @@ run_kubectl_delete_interactive_tests() { kubectl config set-context "${CONTEXT}" --namespace="${ns_one}" kube::test::get_object_assert 'configmap -l deletetest3' "{{range.items}}{{${id_field:?}}}:{{end}}" '' - unset KUBECTL_INTERACTIVE_DELETE - set +o nounset set +o errexit } diff --git a/test/cmd/discovery.sh b/test/cmd/discovery.sh index 4979f4356d185..b8f473e139fd5 100755 --- a/test/cmd/discovery.sh +++ b/test/cmd/discovery.sh @@ -303,3 +303,107 @@ run_swagger_tests() { set +o nounset set +o errexit } + +run_ambiguous_shortname_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing ambiguous short name" + + kubectl create -f - << __EOF__ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: foos.bar.com +spec: + group: bar.com + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + test: + type: string + names: + plural: foos + singular: foo + shortNames: + - exmp + kind: Foo + categories: + - all +__EOF__ + + # Test that we can list this new custom resource + kube::test::wait_object_assert customresourcedefinitions "{{range.items}}{{if eq ${id_field:?} \"foos.bar.com\"}}{{$id_field}}:{{end}}{{end}}" 'foos.bar.com:' + + kubectl create -f - << __EOF__ +apiVersion: bar.com/v1 +kind: Foo +metadata: + name: test-crd-foo +spec: + test: test +__EOF__ + + # Test that we can list this new custom resource + kube::test::wait_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-crd-foo:' + + output_message=$(kubectl get exmp) + kube::test::if_has_string "${output_message}" "test-crd-foo" + + kubectl create -f - << __EOF__ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: examples.test.com +spec: + group: test.com + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + test: + type: string + names: + plural: examples + singular: example + shortNames: + - exmp + kind: Example +__EOF__ + + # Test that we can list this new custom resource + kube::test::wait_object_assert customresourcedefinitions "{{range.items}}{{if eq ${id_field:?} \"examples.test.com\"}}{{$id_field}}:{{end}}{{end}}" 'examples.test.com:' + + output_message=$(kubectl get examples 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" 'No resources found' + + output_message=$(kubectl get exmp 2>&1) + kube::test::if_has_string "${output_message}" "test-crd-foo" + kube::test::if_has_string "${output_message}" "short name \"exmp\" could also match lower priority resource examples.test.com" + + # Cleanup + kubectl delete foos/test-crd-foo + kubectl delete customresourcedefinition foos.bar.com + kubectl delete customresourcedefinition examples.test.com + + set +o nounset + set +o errexit +} diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index d3e0220be2525..7da61e7983999 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -518,6 +518,14 @@ runTests() { record_command run_assert_singular_name_tests fi + ######################### + # Ambiguous short name # + ######################### + + if kube::test::if_supports_resource "${customresourcedefinitions}" ; then + record_command run_ambiguous_shortname_tests + fi + ######################### # Assert categories # ######################### @@ -1029,12 +1037,14 @@ runTests() { record_command run_kubectl_debug_general_tests record_command run_kubectl_debug_baseline_tests record_command run_kubectl_debug_restricted_tests + record_command run_kubectl_debug_netadmin_tests fi if kube::test::if_supports_resource "${nodes}" ; then record_command run_kubectl_debug_node_tests record_command run_kubectl_debug_general_node_tests record_command run_kubectl_debug_baseline_node_tests record_command run_kubectl_debug_restricted_node_tests + record_command run_kubectl_debug_netadmin_node_tests fi cleanup_tests diff --git a/test/cmd/plugins.sh b/test/cmd/plugins.sh index 33a238ecf0973..ed876bc7ed548 100755 --- a/test/cmd/plugins.sh +++ b/test/cmd/plugins.sh @@ -57,6 +57,14 @@ run_plugins_tests() { kube::test::if_has_string "${output_message}" 'Client Version' kube::test::if_has_not_string "${output_message}" 'overshadows an existing plugin' + # attempt to run a plugin as a subcommand of kubectl create in the user's PATH + output_message=$(PATH=${TEMP_PATH}:"test/fixtures/pkg/kubectl/plugins/create" kubectl create foo) + kube::test::if_has_string "${output_message}" 'plugin foo as a subcommand of kubectl create command' + + # ensure that a kubectl create cronjob builtin command supersedes a plugin that overshadows it + output_message=$(PATH=${TEMP_PATH}:"test/fixtures/pkg/kubectl/plugins/create" kubectl create cronjob --help) + kube::test::if_has_not_string "${output_message}" 'plugin cronjob as a subcommand of kubectl create command' + rm -fr "${TEMP_PATH}" set +o nounset diff --git a/test/conformance/testdata/conformance.yaml b/test/conformance/testdata/conformance.yaml index cea0edfdbe347..283b513f8df56 100755 --- a/test/conformance/testdata/conformance.yaml +++ b/test/conformance/testdata/conformance.yaml @@ -186,7 +186,7 @@ - testname: Custom Resource Definition, create codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects - works [Conformance]' + works [Conformance]' description: Create a API extension client and define a random custom resource definition. Create the custom resource definition and then delete it. The creation and deletion MUST be successful. @@ -195,7 +195,7 @@ - testname: Custom Resource Definition, status sub-resource codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition - status sub-resource works [Conformance]' + status sub-resource works [Conformance]' description: Create a custom resource definition. Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. @@ -203,7 +203,8 @@ file: test/e2e/apimachinery/custom_resource_definition.go - testname: Custom Resource Definition, list codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]' + Simple CustomResourceDefinition listing custom resource definition objects works + [Conformance]' description: Create a API extension client, define 10 labeled custom resource definitions and list them using a label selector; the list result MUST contain only the labeled custom resource definitions. Delete the labeled custom resource definitions via @@ -213,7 +214,7 @@ file: test/e2e/apimachinery/custom_resource_definition.go - testname: Custom Resource Definition, defaulting codename: '[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - custom resource defaulting for requests and from storage works [Conformance]' + custom resource defaulting for requests and from storage works [Conformance]' description: Create a custom resource definition without default. Create CR. Add default and read CR until the default is applied. Create another CR. Remove default, add default for another field and read CR until new field is defaulted, but old @@ -637,6 +638,38 @@ of 'Terminating' scoped ResourceQuota. release: v1.16 file: test/e2e/apimachinery/resource_quota.go +- testname: API Chunking, server should return chunks of results for list calls + codename: '[sig-api-machinery] Servers with support for API chunking should return + chunks of results for list calls [Conformance]' + description: Create a large number of PodTemplates. Attempt to retrieve the first + chunk with limit set; the server MUST return the chunk of the size not exceeding + the limit with RemainingItems set in the response. Attempt to retrieve the remaining + items by providing the received continuation token and limit; the server MUST + return the remaining items in chunks of the size not exceeding the limit, with + appropriately set RemainingItems field in the response and with the ResourceVersion + returned in the first response. Attempt to list all objects at once without setting + the limit; the server MUST return all items in a single response. + release: v1.29 + file: test/e2e/apimachinery/chunking.go +- testname: API Chunking, server should support continue listing from the last key + even if the original version has been compacted away + codename: '[sig-api-machinery] Servers with support for API chunking should support + continue listing from the last key if the original version has been compacted + away, though the list is inconsistent [Slow] [Conformance]' + description: Create a large number of PodTemplates. Attempt to retrieve the first + chunk with limit set; the server MUST return the chunk of the size not exceeding + the limit with RemainingItems set in the response. Attempt to retrieve the second + page until the continuation token expires; the server MUST return a continuation + token for inconsistent list continuation. Attempt to retrieve the second page + with the received inconsistent list continuation token; the server MUST return + the number of items not exceeding the limit, a new continuation token and appropriately + set RemainingItems field in the response. Attempt to retrieve the remaining pages + by passing the received continuation token; the server MUST return the remaining + items in chunks of the size not exceeding the limit, with appropriately set RemainingItems + field in the response and with the ResourceVersion returned as part of the inconsistent + list. + release: v1.29 + file: test/e2e/apimachinery/chunking.go - testname: API metadata HTTP return codename: '[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance]' @@ -961,7 +994,7 @@ file: test/e2e/apps/replica_set.go - testname: Replica Set, run basic image codename: '[sig-apps] ReplicaSet should serve a basic image on each replica with - a public image [Conformance]' + a public image [Conformance]' description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. release: v1.9 @@ -999,7 +1032,7 @@ file: test/e2e/apps/rc.go - testname: Replication Controller, run basic image codename: '[sig-apps] ReplicationController should serve a basic image on each replica - with a public image [Conformance]' + with a public image [Conformance]' description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. @@ -1118,7 +1151,8 @@ release: v1.21 file: test/e2e/auth/service_accounts.go - testname: Service account tokens auto mount optionally - codename: '[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]' + codename: '[sig-auth] ServiceAccounts should allow opting out of API token automount + [Conformance]' description: Ensure that Service Account keys are mounted into the Pod only when AutoMountServiceToken is not set to false. We test the following scenarios here. 1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil a) Service @@ -1145,7 +1179,7 @@ release: v1.21 file: test/e2e/auth/service_accounts.go - testname: Service Account Tokens Must AutoMount - codename: '[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]' + codename: '[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]' description: Ensure that Service Account keys are mounted into the Container. Pod contains three containers each will read Service Account token, root CA and default namespace respectively from the default API Token Mount path. All these three @@ -1189,7 +1223,7 @@ file: test/e2e/auth/subjectreviews.go - testname: Kubectl, guestbook application codename: '[sig-cli] Kubectl client Guestbook application should create and stop - a working application [Conformance]' + a working application [Conformance]' description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write @@ -1200,21 +1234,21 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, check version v1 codename: '[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in - available api versions [Conformance]' + available api versions [Conformance]' description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed. release: v1.9 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, cluster info codename: '[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes - control plane services is included in cluster-info [Conformance]' + control plane services is included in cluster-info [Conformance]' description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running. release: v1.9 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, describe pod or rc codename: '[sig-cli] Kubectl client Kubectl describe should check if kubectl describe - prints relevant information for rc and pods [Conformance]' + prints relevant information for rc and pods [Conformance]' description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return @@ -1230,7 +1264,8 @@ release: v1.19 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, create service, replication controller - codename: '[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]' + codename: '[sig-cli] Kubectl client Kubectl expose should create services for rc + [Conformance]' description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set @@ -1240,7 +1275,8 @@ release: v1.9 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, label update - codename: '[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]' + codename: '[sig-cli] Kubectl client Kubectl label should update the label on a resource + [Conformance]' description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' @@ -1250,7 +1286,7 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, patch to annotate codename: '[sig-cli] Kubectl client Kubectl patch should add annotations for pods - in rc [Conformance]' + in rc [Conformance]' description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of @@ -1259,7 +1295,7 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, replace codename: '[sig-cli] Kubectl client Kubectl replace should update a single-container - pod''s image [Conformance]' + pod''s image [Conformance]' description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container @@ -1268,7 +1304,7 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, run pod codename: '[sig-cli] Kubectl client Kubectl run pod should create a pod from an - image when restart is Never [Conformance]' + image when restart is Never [Conformance]' description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image. @@ -1285,13 +1321,14 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, version codename: '[sig-cli] Kubectl client Kubectl version should check is all data is - printed [Conformance]' + printed [Conformance]' description: The command 'kubectl version' MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to. release: v1.9 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, proxy socket - codename: '[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]' + codename: '[sig-cli] Kubectl client Proxy server should support --unix-socket=/path + [Conformance]' description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string @@ -1299,7 +1336,7 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, proxy port zero codename: '[sig-cli] Kubectl client Proxy server should support proxy with --port - 0 [Conformance]' + 0 [Conformance]' description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string. @@ -1307,14 +1344,15 @@ file: test/e2e/kubectl/kubectl.go - testname: Kubectl, replication controller codename: '[sig-cli] Kubectl client Update Demo should create and stop a replication - controller [Conformance]' + controller [Conformance]' description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. release: v1.9 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, scale replication controller - codename: '[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]' + codename: '[sig-cli] Kubectl client Update Demo should scale a replication controller + [Conformance]' description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update @@ -1323,7 +1361,8 @@ release: v1.9 file: test/e2e/kubectl/kubectl.go - testname: Kubectl, logs - codename: '[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance]' + codename: '[sig-cli] Kubectl logs logs should be able to retrieve and filter logs + [Conformance]' description: When a Pod is running then it MUST generate logs. Starting a Pod should have a expected log line. Also log command options MUST work as expected and described below. 'kubectl logs -tail=1' should generate a output of one line, the last line @@ -1397,7 +1436,7 @@ release: v1.15 file: test/e2e/network/dns.go - testname: DNS, services - codename: '[sig-network] DNS should provide DNS for services [Conformance]' + codename: '[sig-network] DNS should provide DNS for services [Conformance]' description: When a headless service is created, the service MUST be able to resolve all the required service endpoints. When the service is created, any pod in the same namespace must be able to resolve the service by all of the expected DNS @@ -1405,7 +1444,7 @@ release: v1.9 file: test/e2e/network/dns.go - testname: DNS, cluster - codename: '[sig-network] DNS should provide DNS for the cluster [Conformance]' + codename: '[sig-network] DNS should provide DNS for the cluster [Conformance]' description: When a Pod is created, the pod MUST be able to resolve cluster dns entries such as kubernetes.default via DNS. release: v1.9 @@ -1497,8 +1536,8 @@ release: v1.19 file: test/e2e/network/ingress.go - testname: IngressClass API - codename: '[sig-network] IngressClass API should support creating IngressClass - API operations [Conformance]' + codename: '[sig-network] IngressClass API should support creating IngressClass API + operations [Conformance]' description: ' - The networking.k8s.io API group MUST exist in the /apis discovery document. - The networking.k8s.io/v1 API group/version MUST exist in the /apis/networking.k8s.io discovery document. - The ingressclasses resource MUST exist in the /apis/networking.k8s.io/v1 @@ -1575,13 +1614,14 @@ release: v1.21 file: test/e2e/network/proxy.go - testname: Proxy, logs service endpoint - codename: '[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]' + codename: '[sig-network] Proxy version v1 should proxy through a service and a pod + [Conformance]' description: Select any node in the cluster to invoke /logs endpoint using the /nodes/proxy subresource from the kubelet port. This endpoint MUST be reachable. release: v1.9 file: test/e2e/network/proxy.go - testname: Service endpoint latency, thresholds - codename: '[sig-network] Service endpoints latency should not be very high [Conformance]' + codename: '[sig-network] Service endpoints latency should not be very high [Conformance]' description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, @@ -1729,13 +1769,13 @@ release: v1.19 file: test/e2e/network/service.go - testname: Kubernetes Service - codename: '[sig-network] Services should provide secure master service [Conformance]' + codename: '[sig-network] Services should provide secure master service [Conformance]' description: By default when a kubernetes cluster is running there MUST be a 'kubernetes' service running in the cluster. release: v1.9 file: test/e2e/network/service.go - testname: Service, endpoints - codename: '[sig-network] Services should serve a basic endpoint from pods [Conformance]' + codename: '[sig-network] Services should serve a basic endpoint from pods [Conformance]' description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then @@ -1756,7 +1796,7 @@ release: v1.29 file: test/e2e/network/service.go - testname: Service, endpoints with multiple ports - codename: '[sig-network] Services should serve multiport endpoints from pods [Conformance]' + codename: '[sig-network] Services should serve multiport endpoints from pods [Conformance]' description: Create a service with two ports but no Pods are added to the service yet. The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod @@ -2194,7 +2234,7 @@ release: v1.13 file: test/e2e/common/node/pods.go - testname: Pods, prestop hook - codename: '[sig-node] PreStop should call prestop when killing a pod [Conformance]' + codename: '[sig-node] PreStop should call prestop when killing a pod [Conformance]' description: Create a server pod with a rest endpoint '/write' that changes state.Received field. Create a Pod with a pre-stop handle that posts to the /write endpoint on the server Pod. Verify that the Pod with pre-stop hook is running. Delete the @@ -2290,16 +2330,6 @@ count MUST be zero. release: v1.9 file: test/e2e/common/node/container_probe.go -- testname: RuntimeClass API - codename: '[sig-node] RuntimeClass should support RuntimeClasses API operations - [Conformance]' - description: ' The node.k8s.io API group MUST exist in the /apis discovery document. - The node.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery - document. The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery - document. The runtimeclasses resource must support create, get, list, watch, update, - patch, delete, and deletecollection.' - release: v1.20 - file: test/e2e/common/node/runtimeclass.go - testname: Pod with the deleted RuntimeClass is rejected. codename: '[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance]' @@ -2330,6 +2360,16 @@ is not being tested here. release: v1.20 file: test/e2e/common/node/runtimeclass.go +- testname: RuntimeClass API + codename: '[sig-node] RuntimeClass should support RuntimeClasses API operations + [Conformance]' + description: ' The node.k8s.io API group MUST exist in the /apis discovery document. + The node.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery + document. The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery + document. The runtimeclasses resource must support create, get, list, watch, update, + patch, delete, and deletecollection.' + release: v1.20 + file: test/e2e/common/node/runtimeclass.go - testname: Secrets, pod environment field codename: '[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance]' @@ -2426,10 +2466,11 @@ file: test/e2e/common/node/sysctl.go - testname: Sysctl, test sysctls codename: '[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls - [MinimumKubeletVersion:1.21] [Conformance]' + [MinimumKubeletVersion:1.21] [Environment:NotInUserNS] [Conformance]' description: 'Pod is created with kernel.shm_rmid_forced sysctl. Kernel.shm_rmid_forced must be set to 1 [LinuxOnly]: This test is marked as LinuxOnly since Windows does - not support sysctls' + not support sysctls [Environment:NotInUserNS]: The test fails in UserNS (as expected): + `open /proc/sys/kernel/shm_rmid_forced: permission denied`' release: v1.21 file: test/e2e/common/node/sysctl.go - testname: Environment variables, expansion @@ -2511,13 +2552,13 @@ file: test/e2e/scheduling/limit_range.go - testname: Scheduler, resource limits codename: '[sig-scheduling] SchedulerPredicates [Serial] validates resource limits - of pods that are allowed to run [Conformance]' + of pods that are allowed to run [Conformance]' description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity. release: v1.9 file: test/e2e/scheduling/predicates.go - testname: Scheduler, node selector matching codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector - is respected if matching [Conformance]' + is respected if matching [Conformance]' description: 'Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.' @@ -2525,7 +2566,7 @@ file: test/e2e/scheduling/predicates.go - testname: Scheduler, node selector not matching codename: '[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector - is respected if not matching [Conformance]' + is respected if not matching [Conformance]' description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled. @@ -2591,7 +2632,7 @@ release: v1.26 file: test/e2e/storage/csi_inline.go - testname: CSIStorageCapacity API - codename: '[sig-storage] CSIStorageCapacity should support CSIStorageCapacities + codename: '[sig-storage] CSIStorageCapacity should support CSIStorageCapacities API operations [Conformance]' description: ' The storage.k8s.io API group MUST exist in the /apis discovery document. The storage.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery @@ -2967,6 +3008,31 @@ able to start with Secret and ConfigMap volumes mounted into the container. release: v1.13 file: test/e2e/storage/empty_dir_wrapper.go +- testname: PersistentVolumes(Claims), apply changes to a pv/pvc status + codename: '[sig-storage] PersistentVolumes CSI Conformance should apply changes + to a pv/pvc status [Conformance]' + description: Creating PV and PVC MUST succeed. Listing PVs with a labelSelector + MUST succeed. Listing PVCs in a namespace MUST succeed. Reading PVC status MUST + succeed with a valid phase found. Reading PV status MUST succeed with a valid + phase found. Patching the PVC status MUST succeed with its new condition found. + Patching the PV status MUST succeed with the new reason/message found. Updating + the PVC status MUST succeed with its new condition found. Updating the PV status + MUST succeed with the new reason/message found. + release: v1.29 + file: test/e2e/storage/persistent_volumes.go +- testname: PersistentVolumes(Claims), lifecycle + codename: '[sig-storage] PersistentVolumes CSI Conformance should run through the + lifecycle of a PV and a PVC [Conformance]' + description: Creating PV and PVC MUST succeed. Listing PVs with a labelSelector + MUST succeed. Listing PVCs in a namespace MUST succeed. Patching a PV MUST succeed + with its new label found. Patching a PVC MUST succeed with its new label found. + Reading a PV and PVC MUST succeed with required UID retrieved. Deleting a PVC + and PV MUST succeed and it MUST be confirmed. Replacement PV and PVC MUST be created. + Updating a PV MUST succeed with its new label found. Updating a PVC MUST succeed + with its new label found. Deleting the PVC and PV via deleteCollection MUST succeed + and it MUST be confirmed. + release: v1.29 + file: test/e2e/storage/persistent_volumes.go - testname: Projected Volume, multiple projections codename: '[sig-storage] Projected combined should project all components that make up the projection API [Projection][NodeConformance] [Conformance]' @@ -3320,6 +3386,16 @@ to delete the secret, the deletion must succeed. release: v1.21 file: test/e2e/common/storage/secrets_volume.go +- testname: StorageClass, lifecycle + codename: '[sig-storage] StorageClasses CSI Conformance should run through the lifecycle + of a StorageClass [Conformance]' + description: Creating a StorageClass MUST succeed. Reading the StorageClass MUST + succeed. Patching the StorageClass MUST succeed with its new label found. Deleting + the StorageClass MUST succeed and it MUST be confirmed. Replacement StorageClass + MUST be created. Updating the StorageClass MUST succeed with its new label found. + Deleting the StorageClass via deleteCollection MUST succeed and it MUST be confirmed. + release: v1.29 + file: test/e2e/storage/storageclass.go - testname: 'SubPath: Reading content from a configmap volume.' codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [Conformance]' diff --git a/test/conformance/testdata/pending_eligible_endpoints.yaml b/test/conformance/testdata/pending_eligible_endpoints.yaml index 4c821b38d7ee2..bd8072ab20732 100644 --- a/test/conformance/testdata/pending_eligible_endpoints.yaml +++ b/test/conformance/testdata/pending_eligible_endpoints.yaml @@ -1,49 +1,23 @@ --- -- createCoreV1NamespacedPersistentVolumeClaim - createCoreV1NamespacedServiceAccountToken - createCoreV1Node -- createCoreV1PersistentVolume - createStorageV1CSINode -- createStorageV1StorageClass - createStorageV1VolumeAttachment -- deleteCoreV1CollectionNamespacedPersistentVolumeClaim -- deleteCoreV1CollectionPersistentVolume -- deleteCoreV1NamespacedPersistentVolumeClaim - deleteCoreV1Node -- deleteCoreV1PersistentVolume - deleteStorageV1CollectionCSINode -- deleteStorageV1CollectionStorageClass - deleteStorageV1CollectionVolumeAttachment - deleteStorageV1CSINode -- deleteStorageV1StorageClass - deleteStorageV1VolumeAttachment - getInternalApiserverAPIGroup - getResourceAPIGroup -- listCoreV1PersistentVolumeClaimForAllNamespaces - listStorageV1CSINode -- listStorageV1StorageClass - listStorageV1VolumeAttachment -- patchCoreV1NamespacedPersistentVolumeClaim -- patchCoreV1NamespacedPersistentVolumeClaimStatus -- patchCoreV1PersistentVolume -- patchCoreV1PersistentVolumeStatus - patchStorageV1CSINode -- patchStorageV1StorageClass - patchStorageV1VolumeAttachment - patchStorageV1VolumeAttachmentStatus -- readCoreV1NamespacedPersistentVolumeClaim -- readCoreV1NamespacedPersistentVolumeClaimStatus -- readCoreV1PersistentVolume -- readCoreV1PersistentVolumeStatus - readStorageV1CSINode -- readStorageV1StorageClass - readStorageV1VolumeAttachment - readStorageV1VolumeAttachmentStatus -- replaceCoreV1NamespacedPersistentVolumeClaim -- replaceCoreV1NamespacedPersistentVolumeClaimStatus -- replaceCoreV1PersistentVolume -- replaceCoreV1PersistentVolumeStatus - replaceStorageV1CSINode -- replaceStorageV1StorageClass - replaceStorageV1VolumeAttachment - replaceStorageV1VolumeAttachmentStatus diff --git a/test/e2e/README.md b/test/e2e/README.md index bef1f2cbff275..656c86eceec53 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -45,12 +45,10 @@ import ( // test/e2e/lifecycle/framework.go package lifecycle -import "github.com/onsi/ginkgo" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-cluster-lifecycle] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("cluster-lifecycle") ``` ```golang // test/e2e/lifecycle/bootstrap/bootstrap_signer.go diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index d01d529344fb3..b7f12162dc673 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -52,6 +52,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" "k8s.io/utils/pointer" + "k8s.io/utils/strings/slices" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -560,7 +561,6 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient ginkgo.By("Adding a label to the APIService") apiServiceClient := aggrclient.ApiregistrationV1().APIServices() apiServiceLabel := map[string]string{"e2e-apiservice": "patched"} - apiServiceLabelSelector := labels.SelectorFromSet(apiServiceLabel).String() apiServicePatch, err := json.Marshal(map[string]interface{}{ "metadata": map[string]interface{}{ "labels": apiServiceLabel, @@ -641,7 +641,7 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient framework.Logf("Found updated apiService label for %q", apiServiceName) // kubectl delete flunder test-flunder - ginkgo.By(fmt.Sprintf("Delete APIService %q", flunderName)) + ginkgo.By(fmt.Sprintf("Delete flunders resource %q", flunderName)) err = dynamicClient.Delete(ctx, flunderName, metav1.DeleteOptions{}) validateErrorWithDebugInfo(ctx, f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items) @@ -724,6 +724,7 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient } framework.Logf("Found patched status condition for %s", wardle.ObjectMeta.Name) + apiServiceLabelSelector := labels.SelectorFromSet(updatedApiService.Labels).String() ginkgo.By(fmt.Sprintf("APIService deleteCollection with labelSelector: %q", apiServiceLabelSelector)) err = aggrclient.ApiregistrationV1().APIServices().DeleteCollection(ctx, @@ -736,6 +737,24 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient framework.ExpectNoError(err, "failed to count the required APIServices") framework.Logf("APIService %s has been deleted.", apiServiceName) + ginkgo.By("Confirm that the group path of " + apiServiceName + " was removed from root paths") + groupPath := "/apis/" + apiServiceGroupName + err = wait.PollUntilContextTimeout(ctx, apiServiceRetryPeriod, apiServiceRetryTimeout, true, func(ctx context.Context) (done bool, err error) { + rootPaths := metav1.RootPaths{} + statusContent, err = restClient.Get(). + AbsPath("/"). + SetHeader("Accept", "application/json").DoRaw(ctx) + if err != nil { + return false, err + } + err = json.Unmarshal(statusContent, &rootPaths) + if err != nil { + return false, err + } + return !slices.Contains(rootPaths.Paths, groupPath), nil + }) + framework.ExpectNoError(err, "Expected to not find %s from root paths", groupPath) + cleanupSampleAPIServer(ctx, client, aggrclient, n, apiServiceName) } diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index e075159a45d16..c9d091498c257 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -69,6 +69,17 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { }) }) + /* + Release: v1.29 + Testname: API Chunking, server should return chunks of results for list calls + Description: Create a large number of PodTemplates. Attempt to retrieve the first chunk with limit set; + the server MUST return the chunk of the size not exceeding the limit with RemainingItems set in the response. + Attempt to retrieve the remaining items by providing the received continuation token and limit; + the server MUST return the remaining items in chunks of the size not exceeding the limit, with appropriately + set RemainingItems field in the response and with the ResourceVersion returned in the first response. + Attempt to list all objects at once without setting the limit; the server MUST return all items in a single + response. + */ framework.ConformanceIt("should return chunks of results for list calls", func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet @@ -116,6 +127,20 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources)) }) + /* + Release: v1.29 + Testname: API Chunking, server should support continue listing from the last key even if the original version has been compacted away + Description: Create a large number of PodTemplates. Attempt to retrieve the first chunk with limit set; + the server MUST return the chunk of the size not exceeding the limit with RemainingItems set in the response. + Attempt to retrieve the second page until the continuation token expires; the server MUST return a + continuation token for inconsistent list continuation. + Attempt to retrieve the second page with the received inconsistent list continuation token; the server + MUST return the number of items not exceeding the limit, a new continuation token and appropriately set + RemainingItems field in the response. + Attempt to retrieve the remaining pages by passing the received continuation token; the server + MUST return the remaining items in chunks of the size not exceeding the limit, with appropriately + set RemainingItems field in the response and with the ResourceVersion returned as part of the inconsistent list. + */ framework.ConformanceIt("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 541b2145220db..9e6d8956d849b 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -483,7 +483,7 @@ func testCRListConversion(ctx context.Context, f *framework.Framework, testCrd * // waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds. func waitWebhookConversionReady(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) { - framework.ExpectNoError(wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { + framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) { crInstance := &unstructured.Unstructured{ Object: map[string]interface{}{ "kind": crd.Spec.Names.Kind, diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index 258009c16536d..884e2e8961252 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin Create the custom resource definition and then delete it. The creation and deletion MUST be successful. */ - framework.ConformanceIt("creating/deleting custom resource definition objects works ", func(ctx context.Context) { + framework.ConformanceIt("creating/deleting custom resource definition objects works", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") @@ -83,7 +83,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin custom resource definitions via delete collection; the delete MUST be successful and MUST delete only the labeled custom resource definitions. */ - framework.ConformanceIt("listing custom resource definition objects works ", func(ctx context.Context) { + framework.ConformanceIt("listing custom resource definition objects works", func(ctx context.Context) { testListSize := 10 config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") @@ -143,7 +143,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin Description: Create a custom resource definition. Attempt to read, update and patch its status sub-resource; all mutating sub-resource operations MUST be visible to subsequent reads. */ - framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works ", func(ctx context.Context) { + framework.ConformanceIt("getting/updating/patching custom resource definition status sub-resource works", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") apiExtensionClient, err := clientset.NewForConfig(config) @@ -267,7 +267,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin the default is applied. Create another CR. Remove default, add default for another field and read CR until new field is defaulted, but old default stays. */ - framework.ConformanceIt("custom resource defaulting for requests and from storage works ", func(ctx context.Context) { + framework.ConformanceIt("custom resource defaulting for requests and from storage works", func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "loading config") apiExtensionClient, err := clientset.NewForConfig(config) diff --git a/test/e2e/apimachinery/framework.go b/test/e2e/apimachinery/framework.go index 6b7ee59a919eb..4edc3411949f6 100644 --- a/test/e2e/apimachinery/framework.go +++ b/test/e2e/apimachinery/framework.go @@ -16,9 +16,7 @@ limitations under the License. package apimachinery -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-api-machinery] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("api-machinery") diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 8eb838680b0a3..bab23e44ef2d8 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -491,7 +491,7 @@ var _ = SIGDescribe("Garbage collector", func() { } // wait for deployment to create some rs ginkgo.By("Wait for the Deployment to create new ReplicaSet") - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) { rsList, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %w", err) @@ -510,7 +510,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete the deployment: %v", err) } ginkgo.By("wait for all rs to be garbage collected") - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, true, func(ctx context.Context) (bool, error) { objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0} return verifyRemainingObjects(ctx, f, objects) }) @@ -551,7 +551,7 @@ var _ = SIGDescribe("Garbage collector", func() { // wait for deployment to create some rs ginkgo.By("Wait for the Deployment to create new ReplicaSet") var replicaset appsv1.ReplicaSet - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) { rsList, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list rs: %w", err) @@ -568,7 +568,7 @@ var _ = SIGDescribe("Garbage collector", func() { } desiredGeneration := replicaset.Generation - if err := wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 60*time.Second, func(ctx context.Context) (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 60*time.Second, true, func(ctx context.Context) (bool, error) { newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(ctx, replicaset.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -585,7 +585,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete the deployment: %v", err) } ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs") - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, true, func(ctx context.Context) (bool, error) { dList, err := deployClient.List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list deployments: %w", err) @@ -974,7 +974,7 @@ var _ = SIGDescribe("Garbage collector", func() { } // Wait for the canary foreground finalization to complete, which means GC is aware of our new custom resource type var lastCanary *unstructured.Unstructured - if err := wait.PollImmediateWithContext(ctx, 5*time.Second, 3*time.Minute, func(ctx context.Context) (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, true, func(ctx context.Context) (bool, error) { lastCanary, err = resourceClient.Get(ctx, dependentName, metav1.GetOptions{}) return apierrors.IsNotFound(err), nil }); err != nil { @@ -1119,7 +1119,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) ginkgo.By("Wait for the CronJob to create new Job") - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 2*time.Minute, true, func(ctx context.Context) (bool, error) { jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list jobs: %w", err) @@ -1135,7 +1135,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("Failed to delete the CronJob: %v", err) } ginkgo.By("Verify if cronjob does not leave jobs nor pods behind") - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) { objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0} return verifyRemainingObjects(ctx, f, objects) }) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index fcfb1ff189085..0293117d8cc11 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -1185,7 +1185,7 @@ var _ = SIGDescribe("ResourceQuota", func() { }) framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", patchedResourceQuota.Name, ns) - err = wait.PollImmediateWithContext(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) { resourceQuotaResult, err := rqClient.Get(ctx, rqName, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/apimachinery/validatingadmissionpolicy.go b/test/e2e/apimachinery/validatingadmissionpolicy.go index 4f06cac7986b3..836e2cfb1a556 100644 --- a/test/e2e/apimachinery/validatingadmissionpolicy.go +++ b/test/e2e/apimachinery/validatingadmissionpolicy.go @@ -31,12 +31,13 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/features" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" ) -var _ = SIGDescribe("ValidatingAdmissionPolicy [Privileged:ClusterAdmin][Alpha][Feature:ValidatingAdmissionPolicy]", func() { +var _ = SIGDescribe("ValidatingAdmissionPolicy [Privileged:ClusterAdmin]", framework.WithFeatureGate(features.ValidatingAdmissionPolicy), func() { f := framework.NewDefaultFramework("validating-admission-policy") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline @@ -205,11 +206,14 @@ var _ = SIGDescribe("ValidatingAdmissionPolicy [Privileged:ClusterAdmin][Alpha][ Expression: "object.spec.replicas", }). WithVariable(admissionregistrationv1beta1.Variable{ - Name: "replicasReminder", // a bit artificial but good for testing purpose - Expression: "variables.replicas % 2", + Name: "oddReplicas", + Expression: "variables.replicas % 2 == 1", }). WithValidation(admissionregistrationv1beta1.Validation{ - Expression: "variables.replicas > 1 && variables.replicasReminder == 1", + Expression: "variables.replicas > 1", + }). + WithValidation(admissionregistrationv1beta1.Validation{ + Expression: "variables.oddReplicas", }). Build() policy, err := client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Create(ctx, policy, metav1.CreateOptions{}) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 5dd38c279153e..4c76641b43833 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -729,7 +729,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("verifying the validating webhook match conditions") validatingWebhookConfiguration, err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(validatingWebhookConfiguration.Webhooks[0].MatchConditions, initalMatchConditions, "verifying that match conditions are created") + gomega.Expect(validatingWebhookConfiguration.Webhooks[0].MatchConditions).To(gomega.Equal(initalMatchConditions), "verifying that match conditions are created") defer func() { err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, validatingWebhookConfiguration.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting mutating webhook configuration") @@ -753,7 +753,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("verifying the validating webhook match conditions") validatingWebhookConfiguration, err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(ctx, f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(validatingWebhookConfiguration.Webhooks[0].MatchConditions, updatedMatchConditions, "verifying that match conditions are updated") + gomega.Expect(validatingWebhookConfiguration.Webhooks[0].MatchConditions).To(gomega.Equal(updatedMatchConditions), "verifying that match conditions are updated") }) /* @@ -780,7 +780,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("verifying the mutating webhook match conditions") mutatingWebhookConfiguration, err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(mutatingWebhookConfiguration.Webhooks[0].MatchConditions, initalMatchConditions, "verifying that match conditions are created") + gomega.Expect(mutatingWebhookConfiguration.Webhooks[0].MatchConditions).To(gomega.Equal(initalMatchConditions), "verifying that match conditions are created") defer func() { err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(ctx, mutatingWebhookConfiguration.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting mutating webhook configuration") @@ -804,7 +804,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() { ginkgo.By("verifying the mutating webhook match conditions") mutatingWebhookConfiguration, err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, f.UniqueName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(mutatingWebhookConfiguration.Webhooks[0].MatchConditions, updatedMatchConditions, "verifying that match conditions are updated") + gomega.Expect(mutatingWebhookConfiguration.Webhooks[0].MatchConditions).To(gomega.Equal(updatedMatchConditions), "verifying that match conditions are updated") }) /* diff --git a/test/e2e/apps/controller_revision.go b/test/e2e/apps/controller_revision.go index 05b8cfaa5db17..5630b9d57982e 100644 --- a/test/e2e/apps/controller_revision.go +++ b/test/e2e/apps/controller_revision.go @@ -65,7 +65,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { for _, ds := range daemonsets.Items { ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, &ds)) framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") } } @@ -134,7 +134,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -142,7 +142,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector)) dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector}) framework.ExpectNoError(err, "failed to list Daemon Sets") - framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found") + gomega.Expect(dsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found") ds, err := c.AppsV1().DaemonSets(ns).Get(ctx, dsName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -151,7 +151,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { ginkgo.By(fmt.Sprintf("Listing all ControllerRevisions with label %q", dsLabelSelector)) revs, err := csAppsV1.ControllerRevisions("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector}) framework.ExpectNoError(err, "Failed to list ControllerRevision: %v", err) - framework.ExpectEqual(len(revs.Items), 1, "Failed to find any controllerRevisions") + gomega.Expect(revs.Items).To(gomega.HaveLen(1), "Failed to find any controllerRevisions") // Locate the current ControllerRevision from the list var initialRevision *appsv1.ControllerRevision @@ -169,7 +169,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { payload := "{\"metadata\":{\"labels\":{\"" + initialRevision.Name + "\":\"patched\"}}}" patchedControllerRevision, err := csAppsV1.ControllerRevisions(ns).Patch(ctx, initialRevision.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{}) framework.ExpectNoError(err, "failed to patch ControllerRevision %s in namespace %s", initialRevision.Name, ns) - framework.ExpectEqual(patchedControllerRevision.Labels[initialRevision.Name], "patched", "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels) + gomega.Expect(patchedControllerRevision.Labels).To(gomega.HaveKeyWithValue(initialRevision.Name, "patched"), "Did not find 'patched' label for this ControllerRevision. Current labels: %v", patchedControllerRevision.Labels) framework.Logf("%s has been patched", patchedControllerRevision.Name) ginkgo.By("Create a new ControllerRevision") @@ -191,7 +191,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name) ginkgo.By("Confirm that there are two ControllerRevisions") - err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) + err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name)) @@ -199,7 +199,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) ginkgo.By("Confirm that there is only one ControllerRevision") - err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) + err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{}) @@ -216,7 +216,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { return err }) framework.ExpectNoError(err, "failed to update ControllerRevision in namespace: %s", ns) - framework.ExpectEqual(updatedControllerRevision.Labels[currentControllerRevision.Name], "updated", "Did not find 'updated' label for this ControllerRevision. Current labels: %v", currentControllerRevision.Labels) + gomega.Expect(updatedControllerRevision.Labels).To(gomega.HaveKeyWithValue(currentControllerRevision.Name, "updated"), "Did not find 'updated' label for this ControllerRevision. Current labels: %v", updatedControllerRevision.Labels) framework.Logf("%s has been updated", updatedControllerRevision.Name) ginkgo.By("Generate another ControllerRevision by patching the Daemonset") @@ -226,7 +226,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { framework.ExpectNoError(err, "error patching daemon set") ginkgo.By("Confirm that there are two ControllerRevisions") - err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) + err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"} @@ -237,12 +237,12 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() { framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) ginkgo.By("Confirm that there is only one ControllerRevision") - err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) + err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) framework.ExpectNoError(err, "failed to count required ControllerRevisions") list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "failed to list ControllerRevision") - framework.ExpectEqual(list.Items[0].Revision, int64(3), "failed to find the expected revision for the Controller") + gomega.Expect(list.Items[0].Revision).To(gomega.Equal(int64(3)), "failed to find the expected revision for the Controller") framework.Logf("ControllerRevision %q has revision %d", list.Items[0].Name, list.Items[0].Revision) }) }) diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 977ba3ab55830..a363feafc4501 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -342,12 +342,12 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("getting") gottenCronJob, err := cjClient.Get(ctx, createdCronJob.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID) + gomega.Expect(gottenCronJob.UID).To(gomega.Equal(createdCronJob.UID)) ginkgo.By("listing") cjs, err := cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item") + gomega.Expect(cjs.Items).To(gomega.HaveLen(1), "filtered list should have 1 item") ginkgo.By("watching") framework.Logf("starting watch") @@ -359,7 +359,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("cluster-wide listing") clusterCJs, err := clusterCJClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) framework.ExpectNoError(err) - framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items") + gomega.Expect(clusterCJs.Items).To(gomega.HaveLen(1), "filtered list should have 1 item") ginkgo.By("cluster-wide watching") framework.Logf("starting watch") @@ -370,7 +370,7 @@ var _ = SIGDescribe("CronJob", func() { patchedCronJob, err := cjClient.Patch(ctx, createdCronJob.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation") + gomega.Expect(patchedCronJob.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation") ginkgo.By("updating") var cjToUpdate, updatedCronJob *batchv1.CronJob @@ -384,7 +384,7 @@ var _ = SIGDescribe("CronJob", func() { return err }) framework.ExpectNoError(err) - framework.ExpectEqual(updatedCronJob.Annotations["updated"], "true", "updated object should have the applied annotation") + gomega.Expect(updatedCronJob.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation") framework.Logf("waiting for watch events with expected annotations") for sawAnnotations := false; !sawAnnotations; { @@ -394,7 +394,7 @@ var _ = SIGDescribe("CronJob", func() { if !ok { framework.Fail("Watch channel is closed.") } - framework.ExpectEqual(evt.Type, watch.Modified) + gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified)) watchedCronJob, isCronJob := evt.Object.(*batchv1.CronJob) if !isCronJob { framework.Failf("expected CronJob, got %T", evt.Object) @@ -427,7 +427,7 @@ var _ = SIGDescribe("CronJob", func() { if !patchedStatus.Status.LastScheduleTime.Equal(&now1) { framework.Failf("patched object should have the applied lastScheduleTime %#v, got %#v instead", cjStatus.LastScheduleTime, patchedStatus.Status.LastScheduleTime) } - framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation") + gomega.Expect(patchedStatus.Annotations).To(gomega.HaveKeyWithValue("patchedstatus", "true"), "patched object should have the applied annotation") ginkgo.By("updating /status") // we need to use RFC3339 version since conversion over the wire cuts nanoseconds @@ -454,7 +454,7 @@ var _ = SIGDescribe("CronJob", func() { framework.ExpectNoError(err) statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") framework.ExpectNoError(err) - framework.ExpectEqual(string(createdCronJob.UID), statusUID, fmt.Sprintf("createdCronJob.UID: %v expected to match statusUID: %v ", createdCronJob.UID, statusUID)) + gomega.Expect(string(createdCronJob.UID)).To(gomega.Equal(statusUID), "createdCronJob.UID: %v expected to match statusUID: %v ", createdCronJob.UID, statusUID) // CronJob resource delete operations expectFinalizer := func(cj *batchv1.CronJob, msg string) { @@ -508,7 +508,7 @@ func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string, activeJobs, finishedJobs := filterActiveJobs(jobs) if len(finishedJobs) != 1 { framework.Logf("Expected one finished job in namespace %s; activeJobs=%v; finishedJobs=%v", ns, activeJobs, finishedJobs) - framework.ExpectEqual(len(finishedJobs), 1) + gomega.Expect(finishedJobs).To(gomega.HaveLen(1)) } // Job should get deleted when the next job finishes the next minute @@ -524,7 +524,7 @@ func ensureHistoryLimits(ctx context.Context, c clientset.Interface, ns string, activeJobs, finishedJobs = filterActiveJobs(jobs) if len(finishedJobs) != 1 { framework.Logf("Expected one finished job in namespace %s; activeJobs=%v; finishedJobs=%v", ns, activeJobs, finishedJobs) - framework.ExpectEqual(len(finishedJobs), 1) + gomega.Expect(finishedJobs).To(gomega.HaveLen(1)) } ginkgo.By("Removing cronjob") diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index a50b83e75f256..fc8079d72f113 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -92,7 +92,7 @@ type updateDSFunc func(*appsv1.DaemonSet) func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) { daemonsets := c.AppsV1().DaemonSets(namespace) var updateErr error - pollErr := wait.PollImmediateWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { + pollErr := wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) { if ds, err = daemonsets.Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } @@ -105,7 +105,7 @@ func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, name updateErr = err return false, nil }) - if pollErr == wait.ErrWaitTimeout { + if wait.Interrupted(pollErr) { pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr) } return ds, pollErr @@ -127,7 +127,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { for _, ds := range daemonsets.Items { ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, &ds)) framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") } } @@ -182,7 +182,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -192,7 +192,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { pod := podList.Items[0] err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") }) @@ -212,7 +212,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") ginkgo.By("Change node label to blue, check that daemon pod is launched.") @@ -221,8 +221,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) framework.ExpectNoError(err, "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) - framework.ExpectEqual(len(daemonSetLabels), 1) - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) + gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -231,7 +231,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { nodeSelector[daemonsetColorLabel] = "green" greenNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) framework.ExpectNoError(err, "error removing labels on node") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") @@ -240,8 +240,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds, err = c.AppsV1().DaemonSets(ns).Patch(ctx, dsName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}) framework.ExpectNoError(err, "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) - framework.ExpectEqual(len(daemonSetLabels), 1) - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) + gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -275,7 +275,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Initially, daemon pods should not be running on any nodes.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") ginkgo.By("Change node label to blue, check that daemon pod is launched.") @@ -284,8 +284,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { newNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) framework.ExpectNoError(err, "error setting labels on node") daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) - framework.ExpectEqual(len(daemonSetLabels), 1) - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) + gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -293,7 +293,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Remove the node label and wait for daemons to be unscheduled") _, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{}) framework.ExpectNoError(err, "error removing labels on node") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") }) @@ -310,7 +310,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -322,11 +322,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { pod.Status.Phase = v1.PodFailed _, err = c.CoreV1().Pods(ns).UpdateStatus(ctx, &pod, metav1.UpdateOptions{}) framework.ExpectNoError(err, "error failing a daemon pod") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to revive") ginkgo.By("Wait for the failed daemon pod to be completely deleted.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, waitFailedDaemonPodDeleted(c, &pod)) framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted") }) @@ -342,7 +342,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels @@ -351,7 +351,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { waitForHistoryCreated(ctx, c, ns, label, 1) first := curHistory(listDaemonHistories(ctx, c, ns, label), ds) firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] - framework.ExpectEqual(first.Revision, int64(1)) + gomega.Expect(first.Revision).To(gomega.Equal(int64(1))) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash) ginkgo.By("Update daemon pods image.") @@ -360,11 +360,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods images aren't updated.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels @@ -372,7 +372,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) waitForHistoryCreated(ctx, c, ns, label, 2) cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds) - framework.ExpectEqual(cur.Revision, int64(2)) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(2))) gomega.Expect(cur.Labels).NotTo(gomega.HaveKeyWithValue(appsv1.DefaultDaemonSetUniqueLabelKey, firstHash)) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), firstHash) }) @@ -392,7 +392,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels @@ -401,7 +401,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { waitForHistoryCreated(ctx, c, ns, label, 1) cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] - framework.ExpectEqual(cur.Revision, int64(1)) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(1))) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) ginkgo.By("Update daemon pods image.") @@ -417,11 +417,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second ginkgo.By("Check that daemon pods images are updated.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, retryTimeout, true, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1)) framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels @@ -430,7 +430,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { waitForHistoryCreated(ctx, c, ns, label, 2) cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] - framework.ExpectEqual(cur.Revision, int64(2)) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(2))) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) }) @@ -452,7 +452,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) framework.Logf("Check that daemon pods launch on every node of the cluster") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.Logf("Update the DaemonSet to trigger a rollout") @@ -464,7 +464,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) // Make sure we're in the middle of a rollout - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkAtLeastOneNewPod(c, ns, label, newImage)) framework.ExpectNoError(err) pods := listDaemonPods(ctx, c, ns, label) @@ -484,7 +484,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { schedulableNodes, err = e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) if len(schedulableNodes.Items) < 2 { - framework.ExpectEqual(len(existingPods), 0) + gomega.Expect(existingPods).To(gomega.BeEmpty()) } else { gomega.Expect(existingPods).NotTo(gomega.BeEmpty()) } @@ -497,7 +497,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) framework.Logf("Make sure DaemonSet rollback is complete") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) framework.ExpectNoError(err) // After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted @@ -562,7 +562,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels @@ -571,7 +571,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { waitForHistoryCreated(ctx, c, ns, label, 1) cur := curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] - framework.ExpectEqual(cur.Revision, int64(1)) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(1))) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) newVersion := "2" @@ -590,7 +590,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout") ageOfOldPod := make(map[string]time.Time) deliberatelyDeletedPods := sets.NewString() - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, retryTimeout, true, func(ctx context.Context) (bool, error) { podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, err @@ -813,7 +813,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods are still running on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") // Check history and labels @@ -822,7 +822,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { waitForHistoryCreated(ctx, c, ns, label, 2) cur = curHistory(listDaemonHistories(ctx, c, ns, label), ds) hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] - framework.ExpectEqual(cur.Revision, int64(2)) + gomega.Expect(cur.Revision).To(gomega.Equal(int64(2))) checkDaemonSetPodsLabels(listDaemonPods(ctx, c, ns, label), hash) }) @@ -846,7 +846,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -854,7 +854,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("listing all DaemonSets") dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list Daemon Sets") - framework.ExpectEqual(len(dsList.Items), 1, "filtered list wasn't found") + gomega.Expect(dsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found") ginkgo.By("DeleteCollection of the DaemonSets") err = dsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: labelSelector}) @@ -863,7 +863,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ginkgo.By("Verify that ReplicaSets have been deleted") dsList, err = c.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "failed to list DaemonSets") - framework.ExpectEqual(len(dsList.Items), 0, "filtered list should have no daemonset") + gomega.Expect(dsList.Items).To(gomega.BeEmpty(), "filtered list should have no daemonset") }) /* Release: v1.22 @@ -894,7 +894,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) framework.ExpectNoError(err) @@ -1097,7 +1097,7 @@ func setDaemonSetNodeLabels(ctx context.Context, c clientset.Interface, nodeName nodeClient := c.CoreV1().Nodes() var newNode *v1.Node var newLabels map[string]string - err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, func(ctx context.Context) (bool, error) { node, err := nodeClient.Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return false, err @@ -1207,7 +1207,7 @@ func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) { podHash := pod.Labels[appsv1.DefaultDaemonSetUniqueLabelKey] gomega.Expect(podHash).ToNot(gomega.BeEmpty()) if len(hash) > 0 { - framework.ExpectEqual(podHash, hash, "unexpected hash for pod %s", pod.Name) + gomega.Expect(podHash).To(gomega.Equal(hash), "unexpected hash for pod %s", pod.Name) } } } @@ -1226,7 +1226,7 @@ func waitForHistoryCreated(ctx context.Context, c clientset.Interface, ns string framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) return false, nil } - err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, listHistoryFn) + err := wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, listHistoryFn) framework.ExpectNoError(err, "error waiting for controllerrevisions to be created") } @@ -1253,7 +1253,7 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet foundCurHistories++ } } - framework.ExpectEqual(foundCurHistories, 1) + gomega.Expect(foundCurHistories).To(gomega.Equal(1)) gomega.Expect(curHistory).NotTo(gomega.BeNil()) return curHistory } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 4d5fb39d7d6d5..616d2b43c7c31 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -373,8 +373,8 @@ var _ = SIGDescribe("Deployment", func() { deploymentGet := appsv1.Deployment{} err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment") - framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image") - framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels") + gomega.Expect(deploymentGet.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(testDeploymentUpdateImage), "failed to update image") + gomega.Expect(deploymentGet.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-deployment", "updated"), "failed to update labels") ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() @@ -432,8 +432,9 @@ var _ = SIGDescribe("Deployment", func() { deploymentGet = appsv1.Deployment{} err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment") - framework.ExpectEqual(deploymentGet.Spec.Template.Spec.Containers[0].Image, testDeploymentUpdateImage, "failed to update image") - framework.ExpectEqual(deploymentGet.ObjectMeta.Labels["test-deployment"], "updated", "failed to update labels") + gomega.Expect(deploymentGet.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(testDeploymentUpdateImage), "failed to update image") + gomega.Expect(deploymentGet.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-deployment", "updated"), "failed to update labels") + ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { @@ -782,7 +783,7 @@ func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) { framework.ExpectNoError(err) _, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c) framework.ExpectNoError(err) - framework.ExpectEqual(len(allOldRSs), 1) + gomega.Expect(allOldRSs).To(gomega.HaveLen(1)) } func testRecreateDeployment(ctx context.Context, f *framework.Framework) { @@ -984,8 +985,8 @@ func testRolloverDeployment(ctx context.Context, f *framework.Framework) { } func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) { - framework.ExpectEqual(*rs.Spec.Replicas, replicas) - framework.ExpectEqual(rs.Status.Replicas, replicas) + gomega.Expect(*rs.Spec.Replicas).To(gomega.Equal(replicas)) + gomega.Expect(rs.Status.Replicas).To(gomega.Equal(replicas)) } func randomScale(d *appsv1.Deployment, i int) { @@ -1141,7 +1142,7 @@ func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) { framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels) - framework.ExpectEqual(len(rsList.Items), 1) + gomega.Expect(rsList.Items).To(gomega.HaveLen(1)) framework.Logf("Obtaining the ReplicaSet's UID") orphanedRSUID := rsList.Items[0].UID @@ -1172,10 +1173,10 @@ func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) { framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels) - framework.ExpectEqual(len(rsList.Items), 1) + gomega.Expect(rsList.Items).To(gomega.HaveLen(1)) framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") - framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID) + gomega.Expect(rsList.Items[0].UID).To(gomega.Equal(orphanedRSUID)) } // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle @@ -1258,7 +1259,7 @@ func testProportionalScalingDeployment(ctx context.Context, f *framework.Framewo // Second rollout's replicaset should have 0 available replicas. framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") - framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0)) + gomega.Expect(secondRS.Status.AvailableReplicas).To(gomega.Equal(int32(0))) // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. newReplicas := replicas + int32(maxSurge) - minAvailableReplicas @@ -1549,7 +1550,7 @@ func watchRecreateDeployment(ctx context.Context, c clientset.Interface, d *apps ctxUntil, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() _, err := watchtools.Until(ctxUntil, d.ResourceVersion, w, condition) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status) } return err @@ -1573,7 +1574,7 @@ func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns, } return len(oldRSs) == desiredRSNum, nil }) - if pollErr == wait.ErrWaitTimeout { + if wait.Interrupted(pollErr) { pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName) testutil.LogReplicaSetsOfDeployment(d, oldRSs, nil, framework.Logf) } @@ -1583,14 +1584,14 @@ func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns, // waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { desiredGeneration := replicaSet.Generation - err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, true, func(ctx context.Context) (bool, error) { rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name) } return err @@ -1599,14 +1600,14 @@ func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.R // waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { desiredGeneration := replicaSet.Generation - err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, true, func(ctx context.Context) (bool, error) { rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name) } return err @@ -1664,8 +1665,8 @@ func testDeploymentSubresources(ctx context.Context, f *framework.Framework) { if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } - framework.ExpectEqual(scale.Spec.Replicas, int32(1)) - framework.ExpectEqual(scale.Status.Replicas, int32(1)) + gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) + gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -1674,14 +1675,14 @@ func testDeploymentSubresources(ctx context.Context, f *framework.Framework) { if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } - framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) + gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) ginkgo.By("verifying the deployment Spec.Replicas was modified") deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get deployment resource: %v", err) } - framework.ExpectEqual(*(deployment.Spec.Replicas), int32(2)) + gomega.Expect(*(deployment.Spec.Replicas)).To(gomega.Equal(int32(2))) ginkgo.By("Patch a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -1698,5 +1699,5 @@ func testDeploymentSubresources(ctx context.Context, f *framework.Framework) { deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get deployment resource: %v", err) - framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas") + gomega.Expect(*(deployment.Spec.Replicas)).To(gomega.Equal(int32(4)), "deployment should have 4 replicas") } diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index e45b362fc8d81..a23f0d2f57478 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -115,7 +115,7 @@ var _ = SIGDescribe("DisruptionController", func() { pdb.Spec.MinAvailable = &newMinAvailable return pdb }, cs.PolicyV1().PodDisruptionBudgets(ns).Update) - framework.ExpectEqual(updatedPDB.Spec.MinAvailable.String(), "2%") + gomega.Expect(updatedPDB.Spec.MinAvailable.String()).To(gomega.Equal("2%")) ginkgo.By("patching the pdb") patchedPDB := patchPDBOrDie(ctx, cs, dc, ns, defaultName, func(old *policyv1.PodDisruptionBudget) (bytes []byte, err error) { @@ -127,7 +127,7 @@ var _ = SIGDescribe("DisruptionController", func() { framework.ExpectNoError(err, "failed to marshal JSON for new data") return newBytes, nil }) - framework.ExpectEqual(patchedPDB.Spec.MinAvailable.String(), "3%") + gomega.Expect(patchedPDB.Spec.MinAvailable.String()).To(gomega.Equal("3%")) deletePDBOrDie(ctx, cs, ns, defaultName) }) @@ -146,7 +146,7 @@ var _ = SIGDescribe("DisruptionController", func() { // Since disruptionAllowed starts out 0, if we see it ever become positive, // that means the controller is working. - err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) { pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{}) if err != nil { return false, err @@ -329,7 +329,7 @@ var _ = SIGDescribe("DisruptionController", func() { // Since disruptionAllowed starts out false, if an eviction is ever allowed, // that means the controller is working. - err = wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) { err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) if err != nil { return false, nil @@ -500,7 +500,7 @@ func deletePDBOrDie(ctx context.Context, cs kubernetes.Interface, ns string, nam func listPDBs(ctx context.Context, cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) { pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns) - framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns) + gomega.Expect(pdbList.Items).To(gomega.HaveLen(count), "Expecting %d PDBs returned in namespace %s", count, ns) pdbNames := make([]string, 0) for _, item := range pdbList.Items { @@ -519,7 +519,7 @@ func deletePDBCollection(ctx context.Context, cs kubernetes.Interface, ns string func waitForPDBCollectionToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string) { ginkgo.By("Waiting for the PDB collection to be deleted") - err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) { pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err @@ -558,7 +558,7 @@ func createPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n func waitForPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) { ginkgo.By("Waiting for all pods to be running") - err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) { pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: "foo=bar"}) if err != nil { return false, err @@ -624,7 +624,7 @@ func createReplicaSetOrDie(ctx context.Context, cs kubernetes.Interface, ns stri func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { ginkgo.By("locating a running pod") - err = wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) { podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return false, err @@ -645,7 +645,7 @@ func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) ( func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns string, name string) { ginkgo.By("Waiting for the pdb to be processed") - err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) { pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, err @@ -660,7 +660,7 @@ func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns st func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string, name string) { ginkgo.By("Waiting for the pdb to be deleted") - err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) { _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done @@ -675,7 +675,7 @@ func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns stri func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) { ginkgo.By("Waiting for the pdb to observed all healthy pods") - err := wait.PollImmediateWithContext(ctx, framework.Poll, wait.ForeverTestTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, framework.Poll, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) { pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{}) if err != nil { return false, err diff --git a/test/e2e/apps/framework.go b/test/e2e/apps/framework.go index dde7fa0326b4b..d940e5f1f2f62 100644 --- a/test/e2e/apps/framework.go +++ b/test/e2e/apps/framework.go @@ -16,9 +16,7 @@ limitations under the License. package apps -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-apps] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("apps") diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index a35d10e0900ff..12f7dd6ed2b6f 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -50,6 +50,7 @@ import ( "k8s.io/kubernetes/test/e2e/scheduling" admissionapi "k8s.io/pod-security-admission/api" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -98,7 +99,7 @@ var _ = SIGDescribe("Job", func() { successes++ } } - framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes) + gomega.Expect(successes).To(gomega.Equal(completions), "expected %d successful job pods, but got %d", completions, successes) }) ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func(ctx context.Context) { @@ -215,7 +216,7 @@ var _ = SIGDescribe("Job", func() { pods, err := e2ejob.GetAllRunningJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectNoError(err, "failed to get running pods for the job: %s/%s", job.Name, job.Namespace) - framework.ExpectEqual(len(pods), 1, "Exactly one running pod is expected") + gomega.Expect(pods).To(gomega.HaveLen(1), "Exactly one running pod is expected") pod := pods[0] ginkgo.By(fmt.Sprintf("Evicting the running pod: %s/%s", pod.Name, pod.Namespace)) evictTarget := &policyv1.Eviction{ @@ -279,30 +280,18 @@ var _ = SIGDescribe("Job", func() { job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) - ginkgo.By("Ensuring pods aren't created for job") - framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) { - pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) - if err != nil { - return false, err - } - return len(pods.Items) > 0, nil - }), wait.ErrWaitTimeout) - ginkgo.By("Checking Job status to observe Suspended state") - job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) - framework.ExpectNoError(err, "failed to retrieve latest job object") - exists := false - for _, c := range job.Status.Conditions { - if c.Type == batchv1.JobSuspended { - exists = true - break - } - } - if !exists { - framework.Failf("Job was expected to be completed or failed") - } + err = e2ejob.WaitForJobSuspend(ctx, f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "failed to observe suspend state: %s", f.Namespace.Name) + + ginkgo.By("Ensuring pods aren't created for job") + pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "failed to list pod for a given job %s in namespace %s", job.Name, f.Namespace.Name) + gomega.Expect(pods.Items).To(gomega.BeEmpty()) ginkgo.By("Updating the job with suspend=false") + job, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).Get(ctx, job.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get job in namespace: %s", f.Namespace.Name) job.Spec.Suspend = pointer.BoolPtr(false) job, err = e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) @@ -324,21 +313,15 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name) ginkgo.By("Updating the job with suspend=true") - err = wait.PollImmediate(framework.Poll, framework.SingleCallTimeout, func() (bool, error) { + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) - if err != nil { - return false, err - } + framework.ExpectNoError(err, "unable to get job %s in namespace %s", job.Name, f.Namespace.Name) job.Spec.Suspend = pointer.Bool(true) updatedJob, err := e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job) if err == nil { job = updatedJob - return true, nil - } - if apierrors.IsConflict(err) { - return false, nil } - return false, err + return err }) framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) @@ -389,12 +372,42 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed obtaining completion index from pod in namespace: %s", f.Namespace.Name) succeededIndexes.Insert(ix) expectedName := fmt.Sprintf("%s-%d", job.Name, ix) - framework.ExpectEqual(pod.Spec.Hostname, expectedName, "expected completed pod with hostname %s, but got %s", expectedName, pod.Spec.Hostname) + gomega.Expect(pod.Spec.Hostname).To(gomega.Equal(expectedName), "expected completed pod with hostname %s, but got %s", expectedName, pod.Spec.Hostname) } } gotIndexes := succeededIndexes.List() wantIndexes := []int{0, 1, 2, 3} - framework.ExpectEqual(gotIndexes, wantIndexes, "expected completed indexes %s, but got %s", wantIndexes, gotIndexes) + gomega.Expect(gotIndexes).To(gomega.Equal(wantIndexes), "expected completed indexes %s, but got %s", wantIndexes, gotIndexes) + }) + + /* + Testcase: Ensure that all indexes are executed for an indexed job with backoffLimitPerIndex despite some failing + Description: Create an indexed job and ensure that all indexes are either failed or succeeded, depending + on the end state of the corresponding pods. Pods with odd indexes fail, while the pods with even indexes + succeeded. Also, verify that the number of failed pods doubles the number of failing indexes, as the + backoffLimitPerIndex=1, allowing for one pod recreation before marking that indexed failed. + */ + ginkgo.It("should execute all indexes despite some failing when using backoffLimitPerIndex", func(ctx context.Context) { + ginkgo.By("Creating an indexed job with backoffLimit per index and failing pods") + job := e2ejob.NewTestJob("failOddSucceedEven", "with-backoff-limit-per-index", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) + job.Spec.BackoffLimit = nil + job.Spec.BackoffLimitPerIndex = ptr.To[int32](1) + mode := batchv1.IndexedCompletion + job.Spec.CompletionMode = &mode + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) + framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) + + ginkgo.By("Awaiting for the job to fail as there are failed indexes") + err = e2ejob.WaitForJobFailed(f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) + + ginkgo.By("Verifying the Job status fields to ensure all indexes were executed") + job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "failed to retrieve latest job object") + gomega.Expect(job.Status.FailedIndexes).Should(gomega.HaveValue(gomega.Equal("1,3"))) + gomega.Expect(job.Status.CompletedIndexes).Should(gomega.Equal("0,2")) + gomega.Expect(job.Status.Failed).Should(gomega.Equal(int32(4))) + gomega.Expect(job.Status.Succeeded).Should(gomega.Equal(int32(2))) }) /* @@ -580,7 +593,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name) gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1)) for _, pod := range pods.Items { - framework.ExpectEqual(pod.Status.Phase, v1.PodFailed) + gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed)) } }) @@ -627,7 +640,7 @@ var _ = SIGDescribe("Job", func() { successes++ } } - framework.ExpectEqual(successes, largeCompletions, "expected %d successful job pods, but got %d", largeCompletions, successes) + gomega.Expect(successes).To(gomega.Equal(largeCompletions), "expected %d successful job pods, but got %d", largeCompletions, successes) }) /* @@ -670,7 +683,7 @@ var _ = SIGDescribe("Job", func() { if !patchedStatus.Status.StartTime.Equal(&now1) { framework.Failf("patched object should have the applied StartTime %#v, got %#v instead", jStatus.StartTime, patchedStatus.Status.StartTime) } - framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation") + gomega.Expect(patchedStatus.Annotations).To(gomega.HaveKeyWithValue("patchedstatus", "true"), "patched object should have the applied annotation") ginkgo.By("updating /status") // we need to use RFC3339 version since conversion over the wire cuts nanoseconds @@ -696,7 +709,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err) statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid") framework.ExpectNoError(err) - framework.ExpectEqual(string(job.UID), statusUID, fmt.Sprintf("job.UID: %v expected to match statusUID: %v ", job.UID, statusUID)) + gomega.Expect(string(job.UID)).To(gomega.Equal(statusUID), fmt.Sprintf("job.UID: %v expected to match statusUID: %v ", job.UID, statusUID)) }) /* @@ -752,7 +765,7 @@ var _ = SIGDescribe("Job", func() { updatedValue: "patched", } waitForJobEvent(ctx, c) - framework.ExpectEqual(patchedJob.Labels[jobName], "patched", "Did not find job label for this job. Current labels: %v", patchedJob.Labels) + gomega.Expect(patchedJob.Labels).To(gomega.HaveKeyWithValue(jobName, "patched"), "Did not find job label for this job. Current labels: %v", patchedJob.Labels) ginkgo.By("Updating the job") var updatedJob *batchv1.Job @@ -783,13 +796,13 @@ var _ = SIGDescribe("Job", func() { updatedValue: "true", } waitForJobEvent(ctx, c) - framework.ExpectEqual(updatedJob.Annotations["updated"], "true", "updated Job should have the applied annotation") + gomega.Expect(updatedJob.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated Job should have the applied annotation") framework.Logf("Found Job annotations: %#v", patchedJob.Annotations) ginkgo.By("Listing all Jobs with LabelSelector") jobs, err := f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Failed to list job. %v", err) - framework.ExpectEqual(len(jobs.Items), 1, "Failed to find job %v", jobName) + gomega.Expect(jobs.Items).To(gomega.HaveLen(1), "Failed to find job %v", jobName) testJob := jobs.Items[0] framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels) @@ -819,11 +832,48 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Relist jobs to confirm deletion") jobs, err = f.ClientSet.BatchV1().Jobs("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) framework.ExpectNoError(err, "Failed to list job. %v", err) - framework.ExpectEqual(len(jobs.Items), 0, "Found job %v", jobName) + gomega.Expect(jobs.Items).To(gomega.BeEmpty(), "Found job %v", jobName) }) + ginkgo.It("should update the status ready field", func(ctx context.Context) { + ginkgo.By("Creating a job with suspend=true") + job := e2ejob.NewTestJob("notTerminate", "all-ready", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) + job.Spec.Suspend = ptr.To[bool](true) + job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job) + framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) + + ginkgo.By("Ensure the job controller updates the status.ready field") + err = e2ejob.WaitForJobReady(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To[int32](0)) + framework.ExpectNoError(err, "failed to ensure job status ready field in namespace: %s", f.Namespace.Name) + + ginkgo.By("Updating the job with suspend=false") + err = updateJobSuspendWithRetries(ctx, f, job, ptr.To[bool](false)) + framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) + + ginkgo.By("Ensure the job controller updates the status.ready field") + err = e2ejob.WaitForJobReady(ctx, f.ClientSet, f.Namespace.Name, job.Name, ¶llelism) + framework.ExpectNoError(err, "failed to ensure job status ready field in namespace: %s", f.Namespace.Name) + + ginkgo.By("Updating the job with suspend=true") + err = updateJobSuspendWithRetries(ctx, f, job, ptr.To[bool](true)) + framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name) + + ginkgo.By("Ensure the job controller updates the status.ready field") + err = e2ejob.WaitForJobReady(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To[int32](0)) + framework.ExpectNoError(err, "failed to ensure job status ready field in namespace: %s", f.Namespace.Name) + }) }) +func updateJobSuspendWithRetries(ctx context.Context, f *framework.Framework, job *batchv1.Job, suspend *bool) error { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + job, err := e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "unable to get job %s in namespace %s", job.Name, f.Namespace.Name) + job.Spec.Suspend = suspend + _, err = e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job) + return err + }) +} + // waitForJobEvent is used to track and log Job events. // As delivery of events is not actually guaranteed we // will not return an error if we miss the required event. diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index d20e6d709c75e..ac7af7a6c41f0 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -19,6 +19,7 @@ package apps import ( "context" "encoding/json" + "errors" "fmt" "time" @@ -66,7 +67,7 @@ var _ = SIGDescribe("ReplicationController", func() { Testname: Replication Controller, run basic image Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP. */ - framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { + framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) { TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) }) @@ -205,7 +206,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("patching ReplicationController") testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "Failed to patch ReplicationController") - framework.ExpectEqual(testRcPatched.ObjectMeta.Labels["test-rc"], "patched", "failed to patch RC") + gomega.Expect(testRcPatched.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-rc", "patched"), "failed to patch RC") ginkgo.By("waiting for RC to be modified") eventFound = false ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second) @@ -235,7 +236,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("patching ReplicationController status") rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status") framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus") - framework.ExpectEqual(rcStatus.Status.ReadyReplicas, int32(0), "ReplicationControllerStatus's readyReplicas does not equal 0") + gomega.Expect(rcStatus.Status.ReadyReplicas).To(gomega.Equal(int32(0)), "ReplicationControllerStatus's readyReplicas does not equal 0") ginkgo.By("waiting for RC to be modified") eventFound = false ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second) @@ -281,7 +282,7 @@ var _ = SIGDescribe("ReplicationController", func() { rcStatusUjson, err := json.Marshal(rcStatusUnstructured) framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch") json.Unmarshal(rcStatusUjson, &rcStatus) - framework.ExpectEqual(rcStatus.Status.Replicas, testRcInitialReplicaCount, "ReplicationController ReplicaSet cound does not match initial Replica count") + gomega.Expect(rcStatus.Status.Replicas).To(gomega.Equal(testRcInitialReplicaCount), "ReplicationController ReplicaSet cound does not match initial Replica count") rcScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{ Spec: autoscalingv1.ScaleSpec{ @@ -338,7 +339,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.By("fetching ReplicationController; ensuring that it's patched") rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch ReplicationController") - framework.ExpectEqual(rc.ObjectMeta.Labels["test-rc"], "patched", "ReplicationController is missing a label from earlier patch") + gomega.Expect(rc.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-rc", "patched"), "ReplicationController is missing a label from earlier patch") rcStatusUpdatePayload := rc rcStatusUpdatePayload.Status.AvailableReplicas = 1 @@ -432,13 +433,13 @@ var _ = SIGDescribe("ReplicationController", func() { _, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err) - err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount)) + err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, true, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount)) framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName)) scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get scale subresource: %v", err) - framework.ExpectEqual(scale.Status.Replicas, initialRCReplicaCount, "Failed to get the current replica count") + gomega.Expect(scale.Status.Replicas).To(gomega.Equal(initialRCReplicaCount), "Failed to get the current replica count") ginkgo.By("Updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -447,7 +448,7 @@ var _ = SIGDescribe("ReplicationController", func() { framework.ExpectNoError(err, "Failed to update scale subresource: %v", err) ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName)) - err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount)) + err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, true, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount)) framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") }) }) @@ -524,7 +525,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework } // Sanity check - framework.ExpectEqual(running, replicas, "unexpected number of running and ready pods: %+v", pods.Items) + gomega.Expect(running).To(gomega.Equal(replicas), "unexpected number of running and ready pods: %+v", pods.Items) // Verify that something is listening. framework.Logf("Trying to dial the pod") @@ -554,7 +555,7 @@ func testReplicationControllerConditionCheck(ctx context.Context, f *framework.F quantity := resource.MustParse("2") return (&podQuota).Cmp(quantity) == 0, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("resource quota %q never synced", name) } framework.ExpectNoError(err) @@ -581,7 +582,7 @@ func testReplicationControllerConditionCheck(ctx context.Context, f *framework.F cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure) return cond != nil, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions) } framework.ExpectNoError(err) @@ -610,7 +611,7 @@ func testReplicationControllerConditionCheck(ctx context.Context, f *framework.F cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure) return cond == nil, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions) } framework.ExpectNoError(err) @@ -732,7 +733,7 @@ func updateReplicationControllerWithRetries(ctx context.Context, c clientset.Int updateErr = err return false, nil }) - if pollErr == wait.ErrWaitTimeout { + if wait.Interrupted(pollErr) { pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr) } return rc, pollErr @@ -778,7 +779,7 @@ func watchUntilWithoutRetry(ctx context.Context, watcher watch.Interface, condit } case <-ctx.Done(): - return lastEvent, wait.ErrWaitTimeout + return lastEvent, wait.ErrorInterrupted(errors.New("timed out waiting for the condition")) } } } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 007f048681bbc..82b78e9eb196a 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -45,10 +45,11 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" - imageutils "k8s.io/kubernetes/test/utils/image" + "github.com/onsi/gomega" ) const ( @@ -108,7 +109,7 @@ var _ = SIGDescribe("ReplicaSet", func() { Testname: Replica Set, run basic image Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. */ - framework.ConformanceIt("should serve a basic image on each replica with a public image ", func(ctx context.Context) { + framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) { testReplicaSetServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage) }) @@ -221,7 +222,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, } // Sanity check - framework.ExpectEqual(running, replicas, "unexpected number of running pods: %+v", pods.Items) + gomega.Expect(running).To(gomega.Equal(replicas), "unexpected number of running pods: %+v", pods.Items) // Verify that something is listening. framework.Logf("Trying to dial the pod") @@ -251,7 +252,7 @@ func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) { podQuota := quota.Status.Hard[v1.ResourcePods] return (&podQuota).Cmp(quantity) == 0, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("resource quota %q never synced", name) } framework.ExpectNoError(err) @@ -279,7 +280,7 @@ func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) { return cond != nil, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions) } framework.ExpectNoError(err) @@ -308,7 +309,7 @@ func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) { cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure) return cond == nil, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions) } framework.ExpectNoError(err) @@ -423,8 +424,8 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) { if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } - framework.ExpectEqual(scale.Spec.Replicas, int32(1)) - framework.ExpectEqual(scale.Status.Replicas, int32(1)) + gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) + gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -433,14 +434,14 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) { if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } - framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) + gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) ginkgo.By("verifying the replicaset Spec.Replicas was modified") rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } - framework.ExpectEqual(*(rs.Spec.Replicas), int32(2)) + gomega.Expect(*(rs.Spec.Replicas)).To(gomega.Equal(int32(2))) ginkgo.By("Patch a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -457,8 +458,7 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) { rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err) - framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas") - + gomega.Expect(*(rs.Spec.Replicas)).To(gomega.Equal(int32(4)), "replicaset should have 4 replicas") } // ReplicaSet Replace and Patch tests @@ -585,7 +585,7 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) { ginkgo.By("Listing all ReplicaSets") rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) framework.ExpectNoError(err, "failed to list ReplicaSets") - framework.ExpectEqual(len(rsList.Items), 1, "filtered list wasn't found") + gomega.Expect(rsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found") ginkgo.By("DeleteCollection of the ReplicaSets") err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) @@ -594,7 +594,7 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) { ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted") rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) framework.ExpectNoError(err, "failed to list ReplicaSets") - framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas") + gomega.Expect(rsList.Items).To(gomega.BeEmpty(), "filtered list should have no replicas") } func testRSStatus(ctx context.Context, f *framework.Framework) { diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index baa36de7638ee..a59e4789be11a 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -77,6 +77,8 @@ const ( statefulSetTimeout = 10 * time.Minute // statefulPodTimeout is a timeout for stateful pods to change state statefulPodTimeout = 5 * time.Minute + + testFinalizer = "example.com/test-finalizer" ) var httpProbe = &v1.Probe{ @@ -204,9 +206,9 @@ var _ = SIGDescribe("StatefulSet", func() { pod := pods.Items[0] controllerRef := metav1.GetControllerOf(&pod) gomega.Expect(controllerRef).ToNot(gomega.BeNil()) - framework.ExpectEqual(controllerRef.Kind, ss.Kind) - framework.ExpectEqual(controllerRef.Name, ss.Name) - framework.ExpectEqual(controllerRef.UID, ss.UID) + gomega.Expect(controllerRef.Kind).To(gomega.Equal(ss.Kind)) + gomega.Expect(controllerRef.Name).To(gomega.Equal(ss.Name)) + gomega.Expect(controllerRef.UID).To(gomega.Equal(ss.UID)) ginkgo.By("Orphaning one of the stateful set's pods") e2epod.NewPodClient(f).Update(ctx, pod.Name, func(pod *v1.Pod) { @@ -343,15 +345,15 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision - framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", - ss.Namespace, ss.Name, updateRevision, currentRevision)) + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s created with update revision %s not equal to current revision %s", + ss.Namespace, ss.Name, updateRevision, currentRevision) pods := e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s", + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to currentRevision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } newImage := NewWebserverImage oldImage := ss.Spec.Template.Spec.Containers[0].Image @@ -370,16 +372,16 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Not applying an update when the partition is greater than the number of replicas") for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - oldImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + oldImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } ginkgo.By("Performing a canary update") @@ -405,27 +407,27 @@ var _ = SIGDescribe("StatefulSet", func() { ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - oldImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + oldImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } else { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - newImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", + newImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to new revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - updateRevision)) + updateRevision) } } @@ -437,27 +439,27 @@ var _ = SIGDescribe("StatefulSet", func() { pods = e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - oldImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + oldImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } else { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - newImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", + newImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to new revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - updateRevision)) + updateRevision) } } @@ -478,38 +480,83 @@ var _ = SIGDescribe("StatefulSet", func() { ss, pods = waitForPartitionedRollingUpdate(ctx, c, ss) for i := range pods.Items { if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to current image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - oldImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + oldImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } else { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - newImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", + newImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to new revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - updateRevision)) + updateRevision) } } } - framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, ss.Name, ss.Status.CurrentRevision, - updateRevision)) + updateRevision) }) + ginkgo.It("should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 without failing container", func(ctx context.Context) { + ginkgo.By("Creating a new StatefulSet without failing container") + ss := e2estatefulset.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) + deletingPodForRollingUpdatePartitionTest(ctx, f, c, ns, ss) + }) + + ginkgo.It("should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 with failing container", func(ctx context.Context) { + ginkgo.By("Creating a new StatefulSet with failing container") + ss := e2estatefulset.NewStatefulSet("ss3", ns, headlessSvcName, 3, nil, nil, labels) + ss.Spec.Template.Spec.Containers = append(ss.Spec.Template.Spec.Containers, v1.Container{ + Name: "sleep-exit-with-1", + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{"sh", "-c"}, + Args: []string{` + echo "Running in pod $POD_NAME" + _term(){ + echo "Received SIGTERM signal" + if [ "${POD_NAME}" = "ss3-0" ]; then + exit 1 + else + exit 0 + fi + } + trap _term SIGTERM + while true; do + echo "Running in infinite loop in $POD_NAME" + sleep 1 + done + `, + }, + Env: []v1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + }, + }) + deletingPodForRollingUpdatePartitionTest(ctx, f, c, ns, ss) + }) + // Do not mark this as Conformance. // The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs. ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func(ctx context.Context) { @@ -524,15 +571,15 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision - framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", - ss.Namespace, ss.Name, updateRevision, currentRevision)) + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s created with update revision %s not equal to current revision %s", + ss.Namespace, ss.Name, updateRevision, currentRevision) pods := e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } ginkgo.By("Restoring Pods to the current revision") @@ -543,11 +590,11 @@ var _ = SIGDescribe("StatefulSet", func() { ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name) pods = e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } newImage := NewWebserverImage oldImage := ss.Spec.Template.Spec.Containers[0].Image @@ -572,16 +619,16 @@ var _ = SIGDescribe("StatefulSet", func() { ss = getStatefulSet(ctx, c, ss.Namespace, ss.Name) pods = e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not equal to new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - newImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + newImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s has revision %s not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - updateRevision)) + updateRevision) } }) @@ -865,8 +912,8 @@ var _ = SIGDescribe("StatefulSet", func() { if err != nil { framework.Failf("Failed to get scale subresource: %v", err) } - framework.ExpectEqual(scale.Spec.Replicas, int32(1)) - framework.ExpectEqual(scale.Status.Replicas, int32(1)) + gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) + gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) ginkgo.By("updating a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -875,14 +922,14 @@ var _ = SIGDescribe("StatefulSet", func() { if err != nil { framework.Failf("Failed to put scale subresource: %v", err) } - framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) + gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) ginkgo.By("verifying the statefulset Spec.Replicas was modified") ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get statefulset resource: %v", err) } - framework.ExpectEqual(*(ss.Spec.Replicas), int32(2)) + gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2))) ginkgo.By("Patch a scale subresource") scale.ResourceVersion = "" // indicate the scale update should be unconditional @@ -900,7 +947,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("verifying the statefulset Spec.Replicas was modified") ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err) - framework.ExpectEqual(*(ss.Spec.Replicas), int32(4), "statefulset should have 4 replicas") + gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(4)), "statefulset should have 4 replicas") }) /* @@ -953,15 +1000,15 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err, "failed to patch Set") ss, err = c.AppsV1().StatefulSets(ns).Get(ctx, ssName, metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get statefulset resource: %v", err) - framework.ExpectEqual(*(ss.Spec.Replicas), ssPatchReplicas, "statefulset should have 2 replicas") - framework.ExpectEqual(ss.Spec.Template.Spec.Containers[0].Image, ssPatchImage, "statefulset not using ssPatchImage. Is using %v", ss.Spec.Template.Spec.Containers[0].Image) + gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(ssPatchReplicas), "statefulset should have 2 replicas") + gomega.Expect(ss.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(ssPatchImage), "statefulset not using ssPatchImage. Is using %v", ss.Spec.Template.Spec.Containers[0].Image) e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) waitForStatus(ctx, c, ss) ginkgo.By("Listing all StatefulSets") ssList, err := c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"}) framework.ExpectNoError(err, "failed to list StatefulSets") - framework.ExpectEqual(len(ssList.Items), 1, "filtered list wasn't found") + gomega.Expect(ssList.Items).To(gomega.HaveLen(1), "filtered list wasn't found") ginkgo.By("Delete all of the StatefulSets") err = c.AppsV1().StatefulSets(ns).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "test-ss=patched"}) @@ -970,7 +1017,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Verify that StatefulSets have been deleted") ssList, err = c.AppsV1().StatefulSets("").List(ctx, metav1.ListOptions{LabelSelector: "test-ss=patched"}) framework.ExpectNoError(err, "failed to list StatefulSets") - framework.ExpectEqual(len(ssList.Items), 0, "filtered list should have no Statefulsets") + gomega.Expect(ssList.Items).To(gomega.BeEmpty(), "filtered list should have no Statefulsets") }) /* @@ -1401,7 +1448,7 @@ var _ = SIGDescribe("StatefulSet", func() { framework.ExpectNoError(err) nodeName := pod.Spec.NodeName - framework.ExpectEqual(nodeName, readyNode.Name) + gomega.Expect(nodeName).To(gomega.Equal(readyNode.Name)) node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -1441,7 +1488,7 @@ var _ = SIGDescribe("StatefulSet", func() { pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: klabels.Everything().String()}) framework.ExpectNoError(err) - framework.ExpectEqual(len(pvcList.Items), 1) + gomega.Expect(pvcList.Items).To(gomega.HaveLen(1)) pvcName := pvcList.Items[0].Name ginkgo.By("Deleting PVC") @@ -1459,7 +1506,7 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.WaitForStatusReadyReplicas(ctx, c, ss, 1) pod, err = c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(pod.Spec.NodeName, readyNode.Name) // confirm the pod was scheduled back to the original node + gomega.Expect(pod.Spec.NodeName).To(gomega.Equal(readyNode.Name)) // confirm the pod was scheduled back to the original node }) }) @@ -1826,7 +1873,7 @@ func lastLine(out string) string { } func pollReadWithTimeout(ctx context.Context, statefulPod statefulPodTester, statefulPodNumber int, key, expectedVal string) error { - err := wait.PollImmediateWithContext(ctx, time.Second, readTimeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, time.Second, readTimeout, true, func(ctx context.Context) (bool, error) { val := statefulPod.read(statefulPodNumber, key) if val == "" { return false, nil @@ -1836,7 +1883,7 @@ func pollReadWithTimeout(ctx context.Context, statefulPod statefulPodTester, sta return true, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return fmt.Errorf("timed out when trying to read value for key %v from stateful pod %d", key, statefulPodNumber) } return err @@ -1851,15 +1898,15 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision - framework.ExpectEqual(currentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", - ss.Namespace, ss.Name, updateRevision, currentRevision)) + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s created with update revision %s not equal to current revision %s", + ss.Namespace, ss.Name, updateRevision, currentRevision) pods := e2estatefulset.GetPodList(ctx, c, ss) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], currentRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s", + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, currentRevision), "Pod %s/%s revision %s is not equal to current revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - currentRevision)) + currentRevision) } e2estatefulset.SortStatefulPods(pods) err = breakPodHTTPProbe(ss, &pods.Items[1]) @@ -1887,22 +1934,22 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app framework.ExpectNoError(err) ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name) ss, pods = waitForRollingUpdate(ctx, c, ss) - framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision), "StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, ss.Name, ss.Status.CurrentRevision, - updateRevision)) + updateRevision) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, newImage, fmt.Sprintf(" Pod %s/%s has image %s not have new image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), "Pod %s/%s has image %s not have new image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - newImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], updateRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s", + newImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, updateRevision), "Pod %s/%s revision %s is not equal to update revision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - updateRevision)) + updateRevision) } ginkgo.By("Rolling back to a previous revision") @@ -1916,7 +1963,7 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app framework.ExpectNoError(err) ss = waitForStatus(ctx, c, ss) currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision - framework.ExpectEqual(priorRevision, updateRevision, "Prior revision should equal update revision during roll back") + gomega.Expect(priorRevision).To(gomega.Equal(updateRevision), "Prior revision should equal update revision during roll back") gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision), "Current revision should not equal update revision during roll back") ginkgo.By("Rolling back update in reverse ordinal order") @@ -1925,23 +1972,161 @@ func rollbackTest(ctx context.Context, c clientset.Interface, ns string, ss *app restorePodHTTPProbe(ss, &pods.Items[1]) ss, _ = e2estatefulset.WaitForPodReady(ctx, c, ss, pods.Items[1].Name) ss, pods = waitForRollingUpdate(ctx, c, ss) - framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", + gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(priorRevision), "StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", ss.Namespace, ss.Name, ss.Status.CurrentRevision, - updateRevision)) + updateRevision) for i := range pods.Items { - framework.ExpectEqual(pods.Items[i].Spec.Containers[0].Image, oldImage, fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s", + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), "Pod %s/%s has image %s not equal to previous image %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, - oldImage)) - framework.ExpectEqual(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], priorRevision, fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s", + oldImage) + gomega.Expect(pods.Items[i].Labels).To(gomega.HaveKeyWithValue(appsv1.StatefulSetRevisionLabel, priorRevision), "Pod %s/%s revision %s is not equal to prior revision %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], + priorRevision) + } +} + +// This function is used canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 +func deletingPodForRollingUpdatePartitionTest(ctx context.Context, f *framework.Framework, c clientset.Interface, ns string, ss *appsv1.StatefulSet) { + setHTTPProbe(ss) + ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy { + return &appsv1.RollingUpdateStatefulSetStrategy{ + Partition: pointer.Int32(1), + } + }(), + } + ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{}) + framework.ExpectNoError(err) + e2estatefulset.WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss) + ss = waitForStatus(ctx, c, ss) + currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision + gomega.Expect(currentRevision).To(gomega.Equal(updateRevision), fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s", + ss.Namespace, ss.Name, updateRevision, currentRevision)) + pods := e2estatefulset.GetPodList(ctx, c, ss) + for i := range pods.Items { + gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s", pods.Items[i].Namespace, pods.Items[i].Name, pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], - priorRevision)) + currentRevision)) + } + + ginkgo.By("Adding finalizer for pod-0") + pod0name := getStatefulSetPodNameAtIndex(0, ss) + pod0, err := c.CoreV1().Pods(ns).Get(ctx, pod0name, metav1.GetOptions{}) + framework.ExpectNoError(err) + pod0.Finalizers = append(pod0.Finalizers, testFinalizer) + pod0, err = c.CoreV1().Pods(ss.Namespace).Update(ctx, pod0, metav1.UpdateOptions{}) + framework.ExpectNoError(err) + pods.Items[0] = *pod0 + defer e2epod.NewPodClient(f).RemoveFinalizer(ctx, pod0.Name, testFinalizer) + + ginkgo.By("Updating image on StatefulSet") + newImage := NewWebserverImage + oldImage := ss.Spec.Template.Spec.Containers[0].Image + ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage)) + gomega.Expect(oldImage).ToNot(gomega.Equal(newImage), "Incorrect test setup: should update to a different image") + ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) { + update.Spec.Template.Spec.Containers[0].Image = newImage + }) + framework.ExpectNoError(err) + + ginkgo.By("Creating a new revision") + ss = waitForStatus(ctx, c, ss) + currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision + gomega.Expect(currentRevision).ToNot(gomega.Equal(updateRevision), "Current revision should not equal update revision during rolling update") + + ginkgo.By("Await for all replicas running, all are updated but pod-0") + e2estatefulset.WaitForState(ctx, c, ss, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + ss = set2 + pods = pods2 + if ss.Status.UpdatedReplicas == *ss.Spec.Replicas-1 && ss.Status.Replicas == *ss.Spec.Replicas && ss.Status.ReadyReplicas == *ss.Spec.Replicas { + // rolling updated is not completed, because replica 0 isn't ready + return true, nil + } + return false, nil + }) + + ginkgo.By("Verify pod images before pod-0 deletion and recreation") + for i := range pods.Items { + if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to oldimage image %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Spec.Containers[0].Image, + oldImage)) + gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], + currentRevision)) + } else { + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Spec.Containers[0].Image, + newImage)) + gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], + updateRevision)) + } + } + + ginkgo.By("Deleting the pod-0 so that kubelet terminates it and StatefulSet controller recreates it") + deleteStatefulPodAtIndex(ctx, c, 0, ss) + ginkgo.By("Await for two replicas to be updated, while the pod-0 is not running") + e2estatefulset.WaitForState(ctx, c, ss, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + ss = set2 + pods = pods2 + return ss.Status.ReadyReplicas == *ss.Spec.Replicas-1, nil + }) + + ginkgo.By(fmt.Sprintf("Removing finalizer from pod-0 (%v/%v) to allow recreation", pod0.Namespace, pod0.Name)) + e2epod.NewPodClient(f).RemoveFinalizer(ctx, pod0.Name, testFinalizer) + + ginkgo.By("Await for recreation of pod-0, so that all replicas are running") + e2estatefulset.WaitForState(ctx, c, ss, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { + ss = set2 + pods = pods2 + return ss.Status.ReadyReplicas == *ss.Spec.Replicas, nil + }) + + ginkgo.By("Verify pod images after pod-0 deletion and recreation") + pods = e2estatefulset.GetPodList(ctx, c, ss) + for i := range pods.Items { + if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) { + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage), fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Spec.Containers[0].Image, + oldImage)) + gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], + currentRevision)) + } else { + gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage), fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Spec.Containers[0].Image, + newImage)) + gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision), fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s", + pods.Items[i].Namespace, + pods.Items[i].Name, + pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel], + updateRevision)) + } } } @@ -2058,7 +2243,7 @@ func updateStatefulSetWithRetries(ctx context.Context, c clientset.Interface, na updateErr = err return false, nil }) - if pollErr == wait.ErrWaitTimeout { + if wait.Interrupted(pollErr) { pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr) } return statefulSet, pollErr diff --git a/test/e2e/apps/ttl_after_finished.go b/test/e2e/apps/ttl_after_finished.go index 67fabeab76a3a..7c8a06963bd57 100644 --- a/test/e2e/apps/ttl_after_finished.go +++ b/test/e2e/apps/ttl_after_finished.go @@ -126,7 +126,7 @@ func finishTime(finishedJob *batchv1.Job) metav1.Time { func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { jobs := c.BatchV1().Jobs(namespace) var updateErr error - pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) { if job, err = jobs.Get(ctx, name, metav1.GetOptions{}); err != nil { return false, err } @@ -139,7 +139,7 @@ func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, updateErr = err return false, nil }) - if pollErr == wait.ErrWaitTimeout { + if wait.Interrupted(pollErr) { pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr) } return job, pollErr @@ -148,7 +148,7 @@ func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, // waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have // a non-nil deletionTimestamp (i.e. being deleted). func waitForJobDeleting(ctx context.Context, c clientset.Interface, ns, jobName string) error { - return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) { curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err diff --git a/test/e2e/architecture/framework.go b/test/e2e/architecture/framework.go index 4d7d819c006b4..b8b12b950a6d8 100644 --- a/test/e2e/architecture/framework.go +++ b/test/e2e/architecture/framework.go @@ -16,9 +16,7 @@ limitations under the License. package architecture -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-architecture] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("architecture") diff --git a/test/e2e/auth/framework.go b/test/e2e/auth/framework.go index cf3d006234b3d..0c0e3bc8408ff 100644 --- a/test/e2e/auth/framework.go +++ b/test/e2e/auth/framework.go @@ -16,9 +16,7 @@ limitations under the License. package auth -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-auth] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("auth") diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 136b3ad0b1ca0..77337a49f2ffb 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -76,7 +76,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Token Mount path. All these three files MUST exist and the Service Account mount path MUST be auto mounted to the Container. */ - framework.ConformanceIt("should mount an API token into pods ", func(ctx context.Context) { + framework.ConformanceIt("should mount an API token into pods", func(ctx context.Context) { sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}}, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -159,7 +159,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { include test cases 1a,1b,2a,2b and 2c. In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted. */ - framework.ConformanceIt("should allow opting out of API token automount ", func(ctx context.Context) { + framework.ConformanceIt("should allow opting out of API token automount", func(ctx context.Context) { var err error trueValue := true diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 40941877689c4..f8a830bfd95f2 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -1859,7 +1859,7 @@ func getScaleUpStatus(ctx context.Context, c clientset.Interface) (*scaleUpStatu func waitForScaleUpStatus(ctx context.Context, c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) { var finalErr error var status *scaleUpStatus - err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, true, func(ctx context.Context) (bool, error) { status, finalErr = getScaleUpStatus(ctx, c) if finalErr != nil { return false, nil diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index d0b3fbfe1c1b8..4840a4fad7b75 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -594,7 +594,7 @@ func hpa(name, namespace, deploymentName string, minReplicas, maxReplicas int32, func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { interval := 20 * time.Second - err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) { deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get replication controller %s: %v", deployment, err) @@ -610,7 +610,7 @@ func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs c func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) { interval := 60 * time.Second - err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) { deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed to get replication controller %s: %v", deployment, err) diff --git a/test/e2e/autoscaling/framework.go b/test/e2e/autoscaling/framework.go index 0392976c4cc40..5dd080ee8455b 100644 --- a/test/e2e/autoscaling/framework.go +++ b/test/e2e/autoscaling/framework.go @@ -16,9 +16,7 @@ limitations under the License. package autoscaling -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-autoscaling] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("autoscaling") diff --git a/test/e2e/cloud/framework.go b/test/e2e/cloud/framework.go index 1d80fbbc93737..8eb4e55409db7 100644 --- a/test/e2e/cloud/framework.go +++ b/test/e2e/cloud/framework.go @@ -16,9 +16,7 @@ limitations under the License. package cloud -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-cloud-provider] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("cloud-provider") diff --git a/test/e2e/cloud/gcp/addon_update.go b/test/e2e/cloud/gcp/addon_update.go deleted file mode 100644 index 6d6a51770e244..0000000000000 --- a/test/e2e/cloud/gcp/addon_update.go +++ /dev/null @@ -1,522 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcp - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "strings" - "time" - - "golang.org/x/crypto/ssh" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" - e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - imageutils "k8s.io/kubernetes/test/utils/image" - admissionapi "k8s.io/pod-security-admission/api" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// TODO: it would probably be slightly better to build up the objects -// in the code and then serialize to yaml. -var reconcileAddonController = ` -apiVersion: v1 -kind: ReplicationController -metadata: - name: addon-reconcile-test - namespace: %s - labels: - k8s-app: addon-reconcile-test - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -spec: - replicas: 2 - selector: - k8s-app: addon-reconcile-test - template: - metadata: - labels: - k8s-app: addon-reconcile-test - spec: - containers: - - image: %s - name: addon-reconcile-test - ports: - - containerPort: 9376 - protocol: TCP -` - -// Should update "reconcile" class addon. -var reconcileAddonControllerUpdated = ` -apiVersion: v1 -kind: ReplicationController -metadata: - name: addon-reconcile-test - namespace: %s - labels: - k8s-app: addon-reconcile-test - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - newLabel: addon-reconcile-test -spec: - replicas: 2 - selector: - k8s-app: addon-reconcile-test - template: - metadata: - labels: - k8s-app: addon-reconcile-test - spec: - containers: - - image: %s - name: addon-reconcile-test - ports: - - containerPort: 9376 - protocol: TCP -` - -var ensureExistsAddonService = ` -apiVersion: v1 -kind: Service -metadata: - name: addon-ensure-exists-test - namespace: %s - labels: - k8s-app: addon-ensure-exists-test - addonmanager.kubernetes.io/mode: EnsureExists -spec: - ports: - - port: 9376 - protocol: TCP - targetPort: 9376 - selector: - k8s-app: addon-ensure-exists-test -` - -// Should create but don't update "ensure exist" class addon. -var ensureExistsAddonServiceUpdated = ` -apiVersion: v1 -kind: Service -metadata: - name: addon-ensure-exists-test - namespace: %s - labels: - k8s-app: addon-ensure-exists-test - addonmanager.kubernetes.io/mode: EnsureExists - newLabel: addon-ensure-exists-test -spec: - ports: - - port: 9376 - protocol: TCP - targetPort: 9376 - selector: - k8s-app: addon-ensure-exists-test -` - -var deprecatedLabelAddonService = ` -apiVersion: v1 -kind: Service -metadata: - name: addon-deprecated-label-test - namespace: %s - labels: - k8s-app: addon-deprecated-label-test - kubernetes.io/cluster-service: "true" -spec: - ports: - - port: 9376 - protocol: TCP - targetPort: 9376 - selector: - k8s-app: addon-deprecated-label-test -` - -// Should update addon with label "kubernetes.io/cluster-service=true". -var deprecatedLabelAddonServiceUpdated = ` -apiVersion: v1 -kind: Service -metadata: - name: addon-deprecated-label-test - namespace: %s - labels: - k8s-app: addon-deprecated-label-test - kubernetes.io/cluster-service: "true" - newLabel: addon-deprecated-label-test -spec: - ports: - - port: 9376 - protocol: TCP - targetPort: 9376 - selector: - k8s-app: addon-deprecated-label-test -` - -// Should not create addon without valid label. -var invalidAddonController = ` -apiVersion: v1 -kind: ReplicationController -metadata: - name: invalid-addon-test - namespace: %s - labels: - k8s-app: invalid-addon-test - addonmanager.kubernetes.io/mode: NotMatch -spec: - replicas: 2 - selector: - k8s-app: invalid-addon-test - template: - metadata: - labels: - k8s-app: invalid-addon-test - spec: - containers: - - image: %s - name: invalid-addon-test - ports: - - containerPort: 9376 - protocol: TCP -` - -const ( - addonTestPollInterval = 3 * time.Second - addonTestPollTimeout = 5 * time.Minute - addonNsName = metav1.NamespaceSystem -) - -var serveHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost) - -type stringPair struct { - data, fileName string -} - -var _ = SIGDescribe("Addon update", func() { - - var dir string - var sshClient *ssh.Client - f := framework.NewDefaultFramework("addon-update-test") - f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - - ginkgo.BeforeEach(func() { - // This test requires: - // - SSH master access - // ... so the provider check should be identical to the intersection of - // providers that provide those capabilities. - if !framework.ProviderIs("gce") { - return - } - - var err error - sshClient, err = getMasterSSHClient() - framework.ExpectNoError(err, "Failed to get the master SSH client.") - }) - - ginkgo.AfterEach(func() { - if sshClient != nil { - sshClient.Close() - } - }) - - // WARNING: the test is not parallel-friendly! - ginkgo.It("should propagate add-on file changes [Slow]", func(ctx context.Context) { - // This test requires: - // - SSH - // - master access - // ... so the provider check should be identical to the intersection of - // providers that provide those capabilities. - e2eskipper.SkipUnlessProviderIs("gce") - - //these tests are long, so I squeezed several cases in one scenario - framework.ExpectNotEqual(sshClient, nil) - dir = f.Namespace.Name // we use it only to give a unique string for each test execution - - temporaryRemotePathPrefix := "addon-test-dir" - temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master - defer sshExec(sshClient, fmt.Sprintf("rm -rf %s", temporaryRemotePathPrefix)) // ignore the result in cleanup - sshExecAndVerify(sshClient, fmt.Sprintf("mkdir -p %s", temporaryRemotePath)) - - rcAddonReconcile := "addon-reconcile-controller.yaml" - rcAddonReconcileUpdated := "addon-reconcile-controller-Updated.yaml" - rcInvalid := "invalid-addon-controller.yaml" - - svcAddonDeprecatedLabel := "addon-deprecated-label-service.yaml" - svcAddonDeprecatedLabelUpdated := "addon-deprecated-label-service-updated.yaml" - svcAddonEnsureExists := "addon-ensure-exists-service.yaml" - svcAddonEnsureExistsUpdated := "addon-ensure-exists-service-updated.yaml" - - var remoteFiles = []stringPair{ - {fmt.Sprintf(reconcileAddonController, addonNsName, serveHostnameImage), rcAddonReconcile}, - {fmt.Sprintf(reconcileAddonControllerUpdated, addonNsName, serveHostnameImage), rcAddonReconcileUpdated}, - {fmt.Sprintf(deprecatedLabelAddonService, addonNsName), svcAddonDeprecatedLabel}, - {fmt.Sprintf(deprecatedLabelAddonServiceUpdated, addonNsName), svcAddonDeprecatedLabelUpdated}, - {fmt.Sprintf(ensureExistsAddonService, addonNsName), svcAddonEnsureExists}, - {fmt.Sprintf(ensureExistsAddonServiceUpdated, addonNsName), svcAddonEnsureExistsUpdated}, - {fmt.Sprintf(invalidAddonController, addonNsName, serveHostnameImage), rcInvalid}, - } - - for _, p := range remoteFiles { - err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644) - framework.ExpectNoError(err, "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient) - } - - // directory on kubernetes-master - destinationDirPrefix := "/etc/kubernetes/addons/addon-test-dir" - destinationDir := destinationDirPrefix + "/" + dir - - // cleanup from previous tests - _, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) - framework.ExpectNoError(err, "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient) - - defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup - sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir)) - - ginkgo.By("copy invalid manifests to the destination dir") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid)) - // we will verify at the end of the test that the objects weren't created from the invalid manifests - - ginkgo.By("copy new manifests") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcile, destinationDir, rcAddonReconcile)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabel, destinationDir, svcAddonDeprecatedLabel)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists)) - // Delete the "ensure exist class" addon at the end. - defer func() { - framework.Logf("Cleaning up ensure exist class addon.") - err := f.ClientSet.CoreV1().Services(addonNsName).Delete(ctx, "addon-ensure-exists-test", metav1.DeleteOptions{}) - framework.ExpectNoError(err) - }() - - waitForReplicationControllerInAddonTest(ctx, f.ClientSet, addonNsName, "addon-reconcile-test", true) - waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-deprecated-label-test", true) - waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-ensure-exists-test", true) - - // Replace the manifests with new contents. - ginkgo.By("update manifests") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcileUpdated, destinationDir, rcAddonReconcile)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabelUpdated, destinationDir, svcAddonDeprecatedLabel)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExistsUpdated, destinationDir, svcAddonEnsureExists)) - - // Wait for updated addons to have the new added label. - reconcileSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-reconcile-test"})) - waitForReplicationControllerwithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, true, reconcileSelector) - deprecatedLabelSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-deprecated-label-test"})) - waitForServicewithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, true, deprecatedLabelSelector) - // "Ensure exist class" addon should not be updated. - ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"})) - waitForServicewithSelectorInAddonTest(ctx, f.ClientSet, addonNsName, false, ensureExistSelector) - - ginkgo.By("remove manifests") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists)) - - waitForReplicationControllerInAddonTest(ctx, f.ClientSet, addonNsName, "addon-reconcile-test", false) - waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-deprecated-label-test", false) - // "Ensure exist class" addon will not be deleted when manifest is removed. - waitForServiceInAddonTest(ctx, f.ClientSet, addonNsName, "addon-ensure-exists-test", true) - - ginkgo.By("verify invalid addons weren't created") - _, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(ctx, "invalid-addon-test", metav1.GetOptions{}) - gomega.Expect(err).To(gomega.HaveOccurred()) - - // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. - }) -}) - -func waitForServiceInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace, name string, exist bool) { - framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) -} - -func waitForReplicationControllerInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace, name string, exist bool) { - framework.ExpectNoError(waitForReplicationController(ctx, c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) -} - -func waitForServicewithSelectorInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { - framework.ExpectNoError(waitForServiceWithSelector(ctx, c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout)) -} - -func waitForReplicationControllerwithSelectorInAddonTest(ctx context.Context, c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { - framework.ExpectNoError(waitForReplicationControllerWithSelector(ctx, c, addonNamespace, selector, exist, addonTestPollInterval, - addonTestPollTimeout)) -} - -// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) -func waitForReplicationController(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { - err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { - _, err := c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - framework.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err) - return !exist, nil - } - framework.Logf("ReplicationController %s in namespace %s found.", name, namespace) - return exist, nil - }) - if err != nil { - stateMsg := map[bool]string{true: "to appear", false: "to disappear"} - return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %w", namespace, name, stateMsg[exist], err) - } - return nil -} - -// waitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false) -func waitForServiceWithSelector(ctx context.Context, c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, - timeout time.Duration) error { - err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { - services, err := c.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) - switch { - case len(services.Items) != 0: - framework.Logf("Service with %s in namespace %s found.", selector.String(), namespace) - return exist, nil - case len(services.Items) == 0: - framework.Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace) - return !exist, nil - case err != nil: - framework.Logf("Non-retryable failure while listing service.") - return false, err - default: - framework.Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err) - return false, nil - } - }) - if err != nil { - stateMsg := map[bool]string{true: "to appear", false: "to disappear"} - return fmt.Errorf("error waiting for service with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err) - } - return nil -} - -// waitForReplicationControllerWithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false) -func waitForReplicationControllerWithSelector(ctx context.Context, c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, - timeout time.Duration) error { - err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { - rcs, err := c.CoreV1().ReplicationControllers(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) - switch { - case len(rcs.Items) != 0: - framework.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) - return exist, nil - case len(rcs.Items) == 0: - framework.Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace) - return !exist, nil - default: - framework.Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err) - return false, nil - } - }) - if err != nil { - stateMsg := map[bool]string{true: "to appear", false: "to disappear"} - return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err) - } - return nil -} - -// TODO use the ssh.SSH code, either adding an SCP to it or copying files -// differently. -func getMasterSSHClient() (*ssh.Client, error) { - // Get a signer for the provider. - signer, err := e2essh.GetSigner(framework.TestContext.Provider) - if err != nil { - return nil, fmt.Errorf("error getting signer for provider %s: %w", framework.TestContext.Provider, err) - } - - sshUser := os.Getenv("KUBE_SSH_USER") - if sshUser == "" { - sshUser = os.Getenv("USER") - } - config := &ssh.ClientConfig{ - User: sshUser, - Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } - - host := framework.APIAddress() + ":22" - client, err := ssh.Dial("tcp", host, config) - if err != nil { - return nil, fmt.Errorf("error getting SSH client to host %s: %w", host, err) - } - return client, err -} - -func sshExecAndVerify(client *ssh.Client, cmd string) { - _, _, rc, err := sshExec(client, cmd) - framework.ExpectNoError(err, "Failed to execute %q with ssh client %+v", cmd, client) - gomega.Expect(rc).To(gomega.BeZero(), "error return code from executing command on the cluster: %s", cmd) -} - -func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { - framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) - session, err := client.NewSession() - if err != nil { - return "", "", 0, fmt.Errorf("error creating session to host %s: %w", client.RemoteAddr(), err) - } - defer session.Close() - - // Run the command. - code := 0 - var bout, berr bytes.Buffer - - session.Stdout, session.Stderr = &bout, &berr - err = session.Run(cmd) - if err != nil { - // Check whether the command failed to run or didn't complete. - if exiterr, ok := err.(*ssh.ExitError); ok { - // If we got an ExitError and the exit code is nonzero, we'll - // consider the SSH itself successful (just that the command run - // errored on the host). - if code = exiterr.ExitStatus(); code != 0 { - err = nil - } - } else { - // Some other kind of error happened (e.g. an IOError); consider the - // SSH unsuccessful. - err = fmt.Errorf("failed running `%s` on %s: %w", cmd, client.RemoteAddr(), err) - } - } - return bout.String(), berr.String(), code, err -} - -func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { - framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) - session, err := sshClient.NewSession() - if err != nil { - return fmt.Errorf("error creating session to host %s: %w", sshClient.RemoteAddr(), err) - } - defer session.Close() - - fileSize := len(data) - pipe, err := session.StdinPipe() - if err != nil { - return err - } - defer pipe.Close() - if err := session.Start(fmt.Sprintf("scp -t %s", dir)); err != nil { - return err - } - fmt.Fprintf(pipe, "C%#o %d %s\n", mode, fileSize, fileName) - io.Copy(pipe, strings.NewReader(data)) - fmt.Fprint(pipe, "\x00") - pipe.Close() - return session.Wait() -} diff --git a/test/e2e/cloud/gcp/apps/framework.go b/test/e2e/cloud/gcp/apps/framework.go index 5f2edc490e2cf..7e768e42a2be6 100644 --- a/test/e2e/cloud/gcp/apps/framework.go +++ b/test/e2e/cloud/gcp/apps/framework.go @@ -16,9 +16,7 @@ limitations under the License. package apps -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-apps] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("apps") diff --git a/test/e2e/cloud/gcp/auth/framework.go b/test/e2e/cloud/gcp/auth/framework.go index 8245c662f0434..f0b0298eb7370 100644 --- a/test/e2e/cloud/gcp/auth/framework.go +++ b/test/e2e/cloud/gcp/auth/framework.go @@ -16,9 +16,7 @@ limitations under the License. package auth -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-auth] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("auth") diff --git a/test/e2e/cloud/gcp/common/upgrade_mechanics.go b/test/e2e/cloud/gcp/common/upgrade_mechanics.go index cc4a3db3f1135..75ff89ec09a5d 100644 --- a/test/e2e/cloud/gcp/common/upgrade_mechanics.go +++ b/test/e2e/cloud/gcp/common/upgrade_mechanics.go @@ -122,7 +122,7 @@ func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want s framework.Logf("Checking control plane version") var err error var v *version.Info - waitErr := wait.PollImmediateWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { + waitErr := wait.PollUntilContextTimeout(ctx, 5*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { v, err = c.Discovery().ServerVersion() if err != nil { traceRouteToControlPlane() diff --git a/test/e2e/cloud/gcp/framework.go b/test/e2e/cloud/gcp/framework.go index edd24776ca697..abe47298dab58 100644 --- a/test/e2e/cloud/gcp/framework.go +++ b/test/e2e/cloud/gcp/framework.go @@ -16,9 +16,7 @@ limitations under the License. package gcp -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-cloud-provider-gcp] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("cloud-provider-gcp") diff --git a/test/e2e/cloud/gcp/network/framework.go b/test/e2e/cloud/gcp/network/framework.go index 3e3e946d9f769..055cfa3675bd8 100644 --- a/test/e2e/cloud/gcp/network/framework.go +++ b/test/e2e/cloud/gcp/network/framework.go @@ -16,9 +16,7 @@ limitations under the License. package network -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-network] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("network") diff --git a/test/e2e/cloud/gcp/node/framework.go b/test/e2e/cloud/gcp/node/framework.go index b40fd35c8cad2..7a7ee5d529717 100644 --- a/test/e2e/cloud/gcp/node/framework.go +++ b/test/e2e/cloud/gcp/node/framework.go @@ -16,9 +16,7 @@ limitations under the License. package node -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-node] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("node") diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 3010f30056843..bf8961ec41a55 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -70,7 +70,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { var originalNodeCount int32 ginkgo.BeforeEach(func() { - e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") + e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessNodeCountIsAtLeast(2) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("restoring the original node instance group size") diff --git a/test/e2e/common/network/framework.go b/test/e2e/common/network/framework.go index 3e3e946d9f769..055cfa3675bd8 100644 --- a/test/e2e/common/network/framework.go +++ b/test/e2e/common/network/framework.go @@ -16,9 +16,7 @@ limitations under the License. package network -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-network] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("network") diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index c5900fa533067..b7adf1492d2e7 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -857,6 +857,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/healthz", 8080), InitialDelaySeconds: 15, + TimeoutSeconds: 5, FailureThreshold: 1, } pod := livenessSidecarPodSpec(f.Namespace.Name, nil, livenessProbe) @@ -875,6 +876,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain livenessProbe := &v1.Probe{ ProbeHandler: tcpSocketHandler(8080), InitialDelaySeconds: 15, + TimeoutSeconds: 5, FailureThreshold: 1, } pod := livenessSidecarPodSpec(f.Namespace.Name, nil, livenessProbe) diff --git a/test/e2e/common/node/framework.go b/test/e2e/common/node/framework.go index b40fd35c8cad2..884f4bf48bf21 100644 --- a/test/e2e/common/node/framework.go +++ b/test/e2e/common/node/framework.go @@ -16,9 +16,6 @@ limitations under the License. package node -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" -// SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-node] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("node") diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go index eec1b977e2f41..e423ef5c106f5 100644 --- a/test/e2e/common/node/lifecycle_hook.go +++ b/test/e2e/common/node/lifecycle_hook.go @@ -544,3 +544,52 @@ func getSidecarPodWithHook(name string, image string, lifecycle *v1.Lifecycle) * }, } } + +var _ = SIGDescribe("[Feature:PodLifecycleSleepAction]", func() { + f := framework.NewDefaultFramework("pod-lifecycle-sleep-action") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline + var podClient *e2epod.PodClient + + ginkgo.Context("when create a pod with lifecycle hook using sleep action", func() { + ginkgo.BeforeEach(func(ctx context.Context) { + podClient = e2epod.NewPodClient(f) + }) + ginkgo.It("valid prestop hook using sleep action", func(ctx context.Context) { + lifecycle := &v1.Lifecycle{ + PreStop: &v1.LifecycleHandler{ + Sleep: &v1.SleepAction{Seconds: 5}, + }, + } + podWithHook := getPodWithHook("pod-with-prestop-sleep-hook", imageutils.GetPauseImageName(), lifecycle) + ginkgo.By("create the pod with lifecycle hook using sleep action") + podClient.CreateSync(ctx, podWithHook) + ginkgo.By("delete the pod with lifecycle hook using sleep action") + start := time.Now() + podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + cost := time.Since(start) + // verify that deletion was delayed by sleep seconds + if cost < time.Second*5 || cost > time.Second*10 { + framework.Failf("unexpected delay duration before killing the pod") + } + }) + + ginkgo.It("reduce GracePeriodSeconds during runtime", func(ctx context.Context) { + lifecycle := &v1.Lifecycle{ + PreStop: &v1.LifecycleHandler{ + Sleep: &v1.SleepAction{Seconds: 10}, + }, + } + podWithHook := getPodWithHook("pod-with-prestop-sleep-hook", imageutils.GetPauseImageName(), lifecycle) + ginkgo.By("create the pod with lifecycle hook using sleep action") + podClient.CreateSync(ctx, podWithHook) + ginkgo.By("delete the pod with lifecycle hook using sleep action") + start := time.Now() + podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(2), e2epod.DefaultPodDeletionTimeout) + cost := time.Since(start) + // verify that deletion was delayed by sleep seconds + if cost <= time.Second || cost >= time.Second*5 { + framework.Failf("unexpected delay duration before killing the pod") + } + }) + }) +}) diff --git a/test/e2e/common/node/node_lease.go b/test/e2e/common/node/node_lease.go index d320a90deb00f..e1a671d9581f7 100644 --- a/test/e2e/common/node/node_lease.go +++ b/test/e2e/common/node/node_lease.go @@ -27,10 +27,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" testutils "k8s.io/kubernetes/test/utils" - admissionapi "k8s.io/pod-security-admission/api" "github.com/google/go-cmp/cmp" "github.com/onsi/ginkgo/v2" @@ -170,7 +171,7 @@ var _ = SIGDescribe("NodeLease", func() { return false, fmt.Errorf("node status heartbeat changed in %s (with no other status changes), was waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration) }) // a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates - if err != nil && err != wait.ErrWaitTimeout { + if !wait.Interrupted(err) { framework.ExpectNoError(err, "error waiting for infrequent nodestatus update") } diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index f0620832a2233..da6289061a9d2 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -883,7 +883,7 @@ var _ = SIGDescribe("Pods", func() { // wait for all pods to be deleted ginkgo.By("waiting for all pods to be deleted") - err = wait.PollImmediateWithContext(ctx, podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0)) + err = wait.PollUntilContextTimeout(ctx, podRetryPeriod, f.Timeouts.PodDelete, true, checkPodListQuantity(f, "type=Testing", 0)) framework.ExpectNoError(err, "found a pod(s)") }) diff --git a/test/e2e/common/node/runtimeclass.go b/test/e2e/common/node/runtimeclass.go index 3f167faeab942..a6c6447ee18bf 100644 --- a/test/e2e/common/node/runtimeclass.go +++ b/test/e2e/common/node/runtimeclass.go @@ -187,7 +187,7 @@ var _ = SIGDescribe("RuntimeClass", func() { The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery document. The runtimeclasses resource must support create, get, list, watch, update, patch, delete, and deletecollection. */ - framework.ConformanceIt(" should support RuntimeClasses API operations", func(ctx context.Context) { + framework.ConformanceIt("should support RuntimeClasses API operations", func(ctx context.Context) { // Setup rcVersion := "v1" rcClient := f.ClientSet.NodeV1().RuntimeClasses() diff --git a/test/e2e/common/node/secrets.go b/test/e2e/common/node/secrets.go index 7bb2bfdc681d5..de7bc523aec5e 100644 --- a/test/e2e/common/node/secrets.go +++ b/test/e2e/common/node/secrets.go @@ -178,7 +178,7 @@ var _ = SIGDescribe("Secrets", func() { LabelSelector: "testsecret-constant=true", }) framework.ExpectNoError(err, "failed to list secrets") - framework.ExpectNotEqual(len(secretsList.Items), 0, "no secrets found") + gomega.Expect(secretsList.Items).ToNot(gomega.BeEmpty(), "no secrets found") foundCreatedSecret := false var secretCreatedName string diff --git a/test/e2e/common/node/sysctl.go b/test/e2e/common/node/sysctl.go index 438590bc4b2f5..1de5be7272255 100644 --- a/test/e2e/common/node/sysctl.go +++ b/test/e2e/common/node/sysctl.go @@ -73,8 +73,9 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { Testname: Sysctl, test sysctls Description: Pod is created with kernel.shm_rmid_forced sysctl. Kernel.shm_rmid_forced must be set to 1 [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls + [Environment:NotInUserNS]: The test fails in UserNS (as expected): `open /proc/sys/kernel/shm_rmid_forced: permission denied` */ - framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21]", func(ctx context.Context) { + framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS]", func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ @@ -182,8 +183,9 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { Testname: Sysctl, test sysctls supports slashes Description: Pod is created with kernel/shm_rmid_forced sysctl. Support slashes as sysctl separator. The '/' separator is also accepted in place of a '.' [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls + [Environment:NotInUserNS]: The test fails in UserNS (as expected): `open /proc/sys/kernel/shm_rmid_forced: permission denied` */ - ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23]", func(ctx context.Context) { + ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS]", func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ diff --git a/test/e2e/common/storage/framework.go b/test/e2e/common/storage/framework.go index d3351a06ff214..a7967e83aac4b 100644 --- a/test/e2e/common/storage/framework.go +++ b/test/e2e/common/storage/framework.go @@ -16,9 +16,7 @@ limitations under the License. package storage -import "github.com/onsi/ginkgo/v2" +import "k8s.io/kubernetes/test/e2e/framework" // SIGDescribe annotates the test with the SIG label. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe("[sig-storage] "+text, body) -} +var SIGDescribe = framework.SIGDescribe("storage") diff --git a/test/e2e/dra/README.md b/test/e2e/dra/README.md index e22110755d47e..5b1ca93865d84 100644 --- a/test/e2e/dra/README.md +++ b/test/e2e/dra/README.md @@ -26,7 +26,7 @@ release 0.20, build kind from latest main branch sources or use Kind release bin ### Build kind node image -After building Kubernetes, in Kubernetes source code tree biuld new node image: +After building Kubernetes, in Kubernetes source code tree build new node image: ```bash $ kind build node-image --image dra/node:latest $(pwd) ``` diff --git a/test/e2e/dra/deploy.go b/test/e2e/dra/deploy.go index 2eab75dc58975..71378705ebb09 100644 --- a/test/e2e/dra/deploy.go +++ b/test/e2e/dra/deploy.go @@ -136,6 +136,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) { ginkgo.By(fmt.Sprintf("deploying driver on nodes %v", nodes.NodeNames)) d.Nodes = map[string]*app.ExamplePlugin{} d.Name = d.f.UniqueName + d.NameSuffix + ".k8s.io" + resources.DriverName = d.Name ctx, cancel := context.WithCancel(context.Background()) if d.NameSuffix != "" { @@ -147,7 +148,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) { d.cleanup = append(d.cleanup, cancel) // The controller is easy: we simply connect to the API server. - d.Controller = app.NewController(d.f.ClientSet, d.Name, resources) + d.Controller = app.NewController(d.f.ClientSet, resources) d.wg.Add(1) go func() { defer d.wg.Done() diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index 3f803c3db1247..e62801f41f6c2 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -30,11 +30,13 @@ import ( resourcev1alpha2 "k8s.io/api/resource/v1alpha2" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" "k8s.io/dynamic-resource-allocation/controller" "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/dra/test-driver/app" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" admissionapi "k8s.io/pod-security-admission/api" utilpointer "k8s.io/utils/pointer" @@ -515,6 +517,52 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu framework.ExpectNoError(err, "start pod") } }) + + // This test covers aspects of non graceful node shutdown by DRA controller + // More details about this can be found in the KEP: + // https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/2268-non-graceful-shutdown + // NOTE: this test depends on kind. It will only work with kind cluster as it shuts down one of the + // nodes by running `docker stop `, which is very kind-specific. + ginkgo.It("[Serial] [Disruptive] [Slow] must deallocate on non graceful node shutdown", func(ctx context.Context) { + ginkgo.By("create test pod") + parameters := b.parameters() + label := "app.kubernetes.io/instance" + instance := f.UniqueName + "-test-app" + pod := b.podExternal() + pod.Labels[label] = instance + claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) + b.create(ctx, parameters, claim, pod) + + ginkgo.By("wait for test pod " + pod.Name + " to run") + labelSelector := labels.SelectorFromSet(labels.Set(pod.Labels)) + pods, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, pod.Namespace, labelSelector, 1, framework.PodStartTimeout) + framework.ExpectNoError(err, "start pod") + runningPod := &pods.Items[0] + + nodeName := runningPod.Spec.NodeName + // Prevent builder tearDown to fail waiting for unprepared resources + delete(b.driver.Nodes, nodeName) + ginkgo.By("stop node " + nodeName + " non gracefully") + _, stderr, err := framework.RunCmd("docker", "stop", nodeName) + gomega.Expect(stderr).To(gomega.BeEmpty()) + framework.ExpectNoError(err) + ginkgo.DeferCleanup(framework.RunCmd, "docker", "start", nodeName) + if ok := e2enode.WaitForNodeToBeNotReady(ctx, f.ClientSet, nodeName, f.Timeouts.NodeNotReady); !ok { + framework.Failf("Node %s failed to enter NotReady state", nodeName) + } + + ginkgo.By("apply out-of-service taint on node " + nodeName) + taint := v1.Taint{ + Key: v1.TaintNodeOutOfService, + Effect: v1.TaintEffectNoExecute, + } + e2enode.AddOrUpdateTaintOnNode(ctx, f.ClientSet, nodeName, taint) + e2enode.ExpectNodeHasTaint(ctx, f.ClientSet, nodeName, &taint) + ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, taint) + + ginkgo.By("waiting for claim to get deallocated") + gomega.Eventually(ctx, framework.GetObject(b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Get, claim.Name, metav1.GetOptions{})).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", gomega.BeNil())) + }) }) ginkgo.Context("with node-local resources", func() { diff --git a/test/e2e/dra/test-driver/app/controller.go b/test/e2e/dra/test-driver/app/controller.go index 68f7d90fb5887..c647796678f9f 100644 --- a/test/e2e/dra/test-driver/app/controller.go +++ b/test/e2e/dra/test-driver/app/controller.go @@ -30,24 +30,101 @@ import ( v1 "k8s.io/api/core/v1" resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + listersv1 "k8s.io/client-go/listers/core/v1" "k8s.io/dynamic-resource-allocation/controller" "k8s.io/klog/v2" ) type Resources struct { + DriverName string DontSetReservedFor bool NodeLocal bool - Nodes []string - MaxAllocations int - Shareable bool + // Nodes is a fixed list of node names on which resources are + // available. Mutually exclusive with NodeLabels. + Nodes []string + // NodeLabels are labels which determine on which nodes resources are + // available. Mutually exclusive with Nodes. + NodeLabels labels.Set + MaxAllocations int + Shareable bool // AllocateWrapper, if set, gets called for each Allocate call. AllocateWrapper AllocateWrapperType } +func (r Resources) AllNodes(nodeLister listersv1.NodeLister) []string { + if len(r.NodeLabels) > 0 { + // Determine nodes with resources dynamically. + nodes, _ := nodeLister.List(labels.SelectorFromValidatedSet(r.NodeLabels)) + nodeNames := make([]string, 0, len(nodes)) + for _, node := range nodes { + nodeNames = append(nodeNames, node.Name) + } + return nodeNames + } + return r.Nodes +} + +func (r Resources) NewAllocation(node string, data []byte) *resourcev1alpha2.AllocationResult { + allocation := &resourcev1alpha2.AllocationResult{ + Shareable: r.Shareable, + } + allocation.ResourceHandles = []resourcev1alpha2.ResourceHandle{ + { + DriverName: r.DriverName, + Data: string(data), + }, + } + if node == "" && len(r.NodeLabels) > 0 { + // Available on all nodes matching the labels. + var requirements []v1.NodeSelectorRequirement + for key, value := range r.NodeLabels { + requirements = append(requirements, v1.NodeSelectorRequirement{ + Key: key, + Operator: v1.NodeSelectorOpIn, + Values: []string{value}, + }) + } + allocation.AvailableOnNodes = &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: requirements, + }, + }, + } + } else { + var nodes []string + if node != "" { + // Local to one node. + nodes = append(nodes, node) + } else { + // Available on the fixed set of nodes. + nodes = r.Nodes + } + if len(nodes) > 0 { + allocation.AvailableOnNodes = &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: v1.NodeSelectorOpIn, + Values: nodes, + }, + }, + }, + }, + } + } + } + + return allocation +} + type AllocateWrapperType func(ctx context.Context, claimAllocations []*controller.ClaimAllocation, selectedNode string, handler func(ctx context.Context, @@ -57,8 +134,8 @@ type AllocateWrapperType func(ctx context.Context, claimAllocations []*controlle type ExampleController struct { clientset kubernetes.Interface + nodeLister listersv1.NodeLister resources Resources - driverName string mutex sync.Mutex // allocated maps claim.UID to the node (if network-attached) or empty (if not). @@ -70,11 +147,10 @@ type ExampleController struct { numAllocations, numDeallocations int64 } -func NewController(clientset kubernetes.Interface, driverName string, resources Resources) *ExampleController { +func NewController(clientset kubernetes.Interface, resources Resources) *ExampleController { c := &ExampleController{ - clientset: clientset, - resources: resources, - driverName: driverName, + clientset: clientset, + resources: resources, allocated: make(map[types.UID]string), claimsPerNode: make(map[string]int), @@ -84,7 +160,8 @@ func NewController(clientset kubernetes.Interface, driverName string, resources func (c *ExampleController) Run(ctx context.Context, workers int) { informerFactory := informers.NewSharedInformerFactory(c.clientset, 0 /* resync period */) - ctrl := controller.New(ctx, c.driverName, c, c.clientset, informerFactory) + ctrl := controller.New(ctx, c.resources.DriverName, c, c.clientset, informerFactory) + c.nodeLister = informerFactory.Core().V1().Nodes().Lister() ctrl.SetReservedFor(!c.resources.DontSetReservedFor) informerFactory.Start(ctx.Done()) ctrl.Run(workers) @@ -190,13 +267,14 @@ func (c *ExampleController) allocateOne(ctx context.Context, claim *resourcev1al logger.V(3).V(3).Info("already allocated") } else { logger.V(3).Info("starting", "selectedNode", selectedNode) + nodes := c.resources.AllNodes(c.nodeLister) if c.resources.NodeLocal { node = selectedNode if node == "" { // If none has been selected because we do immediate allocation, // then we need to pick one ourselves. var viableNodes []string - for _, n := range c.resources.Nodes { + for _, n := range nodes { if c.resources.MaxAllocations == 0 || c.claimsPerNode[n] < c.resources.MaxAllocations { viableNodes = append(viableNodes, n) @@ -209,7 +287,7 @@ func (c *ExampleController) allocateOne(ctx context.Context, claim *resourcev1al // number of allocations (even spreading) or the most (packing). node = viableNodes[rand.Intn(len(viableNodes))] logger.V(3).Info("picked a node ourselves", "selectedNode", selectedNode) - } else if !contains(c.resources.Nodes, node) || + } else if !contains(nodes, node) || c.resources.MaxAllocations > 0 && c.claimsPerNode[node] >= c.resources.MaxAllocations { return nil, fmt.Errorf("resources exhausted on node %q", node) @@ -222,9 +300,6 @@ func (c *ExampleController) allocateOne(ctx context.Context, claim *resourcev1al } } - allocation := &resourcev1alpha2.AllocationResult{ - Shareable: c.resources.Shareable, - } p := parameters{ EnvVars: make(map[string]string), NodeName: node, @@ -235,33 +310,7 @@ func (c *ExampleController) allocateOne(ctx context.Context, claim *resourcev1al if err != nil { return nil, fmt.Errorf("encode parameters: %w", err) } - allocation.ResourceHandles = []resourcev1alpha2.ResourceHandle{ - { - DriverName: c.driverName, - Data: string(data), - }, - } - var nodes []string - if node != "" { - nodes = append(nodes, node) - } else { - nodes = c.resources.Nodes - } - if len(nodes) > 0 { - allocation.AvailableOnNodes = &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/hostname", - Operator: v1.NodeSelectorOpIn, - Values: nodes, - }, - }, - }, - }, - } - } + allocation := c.resources.NewAllocation(node, data) if !alreadyAllocated { c.numAllocations++ c.allocated[claim.UID] = node @@ -303,6 +352,7 @@ func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, cl // All nodes are suitable. return nil } + nodes := c.resources.AllNodes(c.nodeLister) if c.resources.NodeLocal { for _, claim := range claims { claim.UnsuitableNodes = nil @@ -312,7 +362,7 @@ func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, cl // can only work if a node has capacity left // for all of them. Also, nodes that the driver // doesn't run on cannot be used. - if !contains(c.resources.Nodes, node) || + if !contains(nodes, node) || c.claimsPerNode[node]+len(claims) > c.resources.MaxAllocations { claim.UnsuitableNodes = append(claim.UnsuitableNodes, node) } @@ -325,7 +375,7 @@ func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, cl for _, claim := range claims { claim.UnsuitableNodes = nil for _, node := range potentialNodes { - if !contains(c.resources.Nodes, node) || + if !contains(nodes, node) || allocations+len(claims) > c.resources.MaxAllocations { claim.UnsuitableNodes = append(claim.UnsuitableNodes, node) } diff --git a/test/e2e/dra/test-driver/app/server.go b/test/e2e/dra/test-driver/app/server.go index b9127a18d2bbf..8bafdb649fcbe 100644 --- a/test/e2e/dra/test-driver/app/server.go +++ b/test/e2e/dra/test-driver/app/server.go @@ -81,7 +81,9 @@ func NewCommand() *cobra.Command { profilePath := fs.String("pprof-path", "", "The HTTP path where pprof profiling will be available, disabled if empty.") fs = sharedFlagSets.FlagSet("CDI") - driverName := fs.String("drivername", "test-driver.cdi.k8s.io", "Resource driver name.") + driverNameFlagName := "drivername" + driverName := fs.String(driverNameFlagName, "test-driver.cdi.k8s.io", "Resource driver name.") + driverNameFlag := fs.Lookup(driverNameFlagName) fs = sharedFlagSets.FlagSet("other") featureGate := featuregate.NewFeatureGate() @@ -192,6 +194,7 @@ func NewCommand() *cobra.Command { "Duration, in seconds, that the acting leader will retry refreshing leadership before giving up.") leaderElectionRetryPeriod := fs.Duration("leader-election-retry-period", 5*time.Second, "Duration, in seconds, the LeaderElector clients should wait between tries of actions.") + fs = controllerFlagSets.FlagSet("controller") resourceConfig := fs.String("resource-config", "", "A JSON file containing a Resources struct. Defaults are unshared, network-attached resources.") fs = controller.Flags() for _, f := range controllerFlagSets.FlagSets { @@ -211,9 +214,12 @@ func NewCommand() *cobra.Command { return fmt.Errorf("parse resource config %q: %w", *resourceConfig, err) } } + if resources.DriverName == "" || driverNameFlag.Changed { + resources.DriverName = *driverName + } run := func() { - controller := NewController(clientset, *driverName, resources) + controller := NewController(clientset, resources) controller.Run(ctx, *workers) } diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 8ef5b337454f8..c069aac9a5074 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -150,7 +150,7 @@ func waitForDaemonSets(ctx context.Context, c clientset.Interface, ns string, al framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) - return wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { + return wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) { dsList, err := c.AppsV1().DaemonSets(ns).List(ctx, metav1.ListOptions{}) if err != nil { framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) @@ -429,7 +429,7 @@ func prepullImages(ctx context.Context, c clientset.Interface) { return daemonset.CheckPresentOnNodes(ctx, c, imgPuller, ns, framework.TestContext.CloudConfig.NumNodes) } framework.Logf("Waiting for %s", imgPuller.Name) - err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonset) + err := wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkDaemonset) framework.ExpectNoError(err, "error waiting for image to be pulled") } } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index af333af5e669e..6c2d342e736bb 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -43,6 +43,10 @@ import ( e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" testfixtures "k8s.io/kubernetes/test/fixtures" + // define and freeze constants + _ "k8s.io/kubernetes/test/e2e/feature" + _ "k8s.io/kubernetes/test/e2e/nodefeature" + // test sources _ "k8s.io/kubernetes/test/e2e/apimachinery" _ "k8s.io/kubernetes/test/e2e/apps" diff --git a/test/e2e/feature/feature.go b/test/e2e/feature/feature.go new file mode 100644 index 0000000000000..461f2d0214f52 --- /dev/null +++ b/test/e2e/feature/feature.go @@ -0,0 +1,137 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package feature contains pre-defined features used by test/e2e and/or +// test/e2e_node. +package feature + +import ( + "k8s.io/kubernetes/test/e2e/framework" +) + +var ( + APIServerIdentity = framework.WithFeature(framework.ValidFeatures.Add("APIServerIdentity")) + AppArmor = framework.WithFeature(framework.ValidFeatures.Add("AppArmor")) + BootstrapTokens = framework.WithFeature(framework.ValidFeatures.Add("BootstrapTokens")) + BoundServiceAccountTokenVolume = framework.WithFeature(framework.ValidFeatures.Add("BoundServiceAccountTokenVolume")) + CloudProvider = framework.WithFeature(framework.ValidFeatures.Add("CloudProvider")) + ClusterAutoscalerScalability1 = framework.WithFeature(framework.ValidFeatures.Add("ClusterAutoscalerScalability1")) + ClusterAutoscalerScalability2 = framework.WithFeature(framework.ValidFeatures.Add("ClusterAutoscalerScalability2")) + ClusterAutoscalerScalability3 = framework.WithFeature(framework.ValidFeatures.Add("ClusterAutoscalerScalability3")) + ClusterAutoscalerScalability4 = framework.WithFeature(framework.ValidFeatures.Add("ClusterAutoscalerScalability4")) + ClusterAutoscalerScalability5 = framework.WithFeature(framework.ValidFeatures.Add("ClusterAutoscalerScalability5")) + ClusterAutoscalerScalability6 = framework.WithFeature(framework.ValidFeatures.Add("ClusterAutoscalerScalability6")) + ClusterDowngrade = framework.WithFeature(framework.ValidFeatures.Add("ClusterDowngrade")) + ClusterSizeAutoscalingGpu = framework.WithFeature(framework.ValidFeatures.Add("ClusterSizeAutoscalingGpu")) + ClusterSizeAutoscalingScaleDown = framework.WithFeature(framework.ValidFeatures.Add("ClusterSizeAutoscalingScaleDown")) + ClusterSizeAutoscalingScaleUp = framework.WithFeature(framework.ValidFeatures.Add("ClusterSizeAutoscalingScaleUp")) + ClusterUpgrade = framework.WithFeature(framework.ValidFeatures.Add("ClusterUpgrade")) + ComprehensiveNamespaceDraining = framework.WithFeature(framework.ValidFeatures.Add("ComprehensiveNamespaceDraining")) + CPUManager = framework.WithFeature(framework.ValidFeatures.Add("CPUManager")) + CustomMetricsAutoscaling = framework.WithFeature(framework.ValidFeatures.Add("CustomMetricsAutoscaling")) + DeviceManager = framework.WithFeature(framework.ValidFeatures.Add("DeviceManager")) + DevicePluginProbe = framework.WithFeature(framework.ValidFeatures.Add("DevicePluginProbe")) + Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade")) + DynamicResourceAllocation = framework.WithFeature(framework.ValidFeatures.Add("DynamicResourceAllocation")) + EphemeralStorage = framework.WithFeature(framework.ValidFeatures.Add("EphemeralStorage")) + Example = framework.WithFeature(framework.ValidFeatures.Add("Example")) + ExperimentalResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("ExperimentalResourceUsageTracking")) + Flexvolumes = framework.WithFeature(framework.ValidFeatures.Add("Flexvolumes")) + GKENodePool = framework.WithFeature(framework.ValidFeatures.Add("GKENodePool")) + GPUClusterDowngrade = framework.WithFeature(framework.ValidFeatures.Add("GPUClusterDowngrade")) + GPUClusterUpgrade = framework.WithFeature(framework.ValidFeatures.Add("GPUClusterUpgrade")) + GPUDevicePlugin = framework.WithFeature(framework.ValidFeatures.Add("GPUDevicePlugin")) + GPUMasterUpgrade = framework.WithFeature(framework.ValidFeatures.Add("GPUMasterUpgrade")) + GPUUpgrade = framework.WithFeature(framework.ValidFeatures.Add("GPUUpgrade")) + HAMaster = framework.WithFeature(framework.ValidFeatures.Add("HAMaster")) + HPA = framework.WithFeature(framework.ValidFeatures.Add("HPA")) + HugePages = framework.WithFeature(framework.ValidFeatures.Add("HugePages")) + Ingress = framework.WithFeature(framework.ValidFeatures.Add("Ingress")) + IngressScale = framework.WithFeature(framework.ValidFeatures.Add("IngressScale")) + InPlacePodVerticalScaling = framework.WithFeature(framework.ValidFeatures.Add("InPlacePodVerticalScaling")) + IPv6DualStack = framework.WithFeature(framework.ValidFeatures.Add("IPv6DualStack")) + Kind = framework.WithFeature(framework.ValidFeatures.Add("Kind")) + KubeletCredentialProviders = framework.WithFeature(framework.ValidFeatures.Add("KubeletCredentialProviders")) + KubeletSecurity = framework.WithFeature(framework.ValidFeatures.Add("KubeletSecurity")) + KubeProxyDaemonSetDowngrade = framework.WithFeature(framework.ValidFeatures.Add("KubeProxyDaemonSetDowngrade")) + KubeProxyDaemonSetUpgrade = framework.WithFeature(framework.ValidFeatures.Add("KubeProxyDaemonSetUpgrade")) + KubeProxyDaemonSetMigration = framework.WithFeature(framework.ValidFeatures.Add("KubeProxyDaemonSetMigration")) + LabelSelector = framework.WithFeature(framework.ValidFeatures.Add("LabelSelector")) + LocalStorageCapacityIsolation = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolation")) + LocalStorageCapacityIsolationQuota = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolationQuota")) + MasterUpgrade = framework.WithFeature(framework.ValidFeatures.Add("MasterUpgrade")) + MemoryManager = framework.WithFeature(framework.ValidFeatures.Add("MemoryManager")) + NEG = framework.WithFeature(framework.ValidFeatures.Add("NEG")) + NetworkingDNS = framework.WithFeature(framework.ValidFeatures.Add("Networking-DNS")) + NetworkingIPv4 = framework.WithFeature(framework.ValidFeatures.Add("Networking-IPv4")) + NetworkingIPv6 = framework.WithFeature(framework.ValidFeatures.Add("Networking-IPv6")) + NetworkingPerformance = framework.WithFeature(framework.ValidFeatures.Add("Networking-Performance")) + NetworkPolicy = framework.WithFeature(framework.ValidFeatures.Add("NetworkPolicy")) + NodeAuthenticator = framework.WithFeature(framework.ValidFeatures.Add("NodeAuthenticator")) + NodeAuthorizer = framework.WithFeature(framework.ValidFeatures.Add("NodeAuthorizer")) + NodeLogQuery = framework.WithFeature(framework.ValidFeatures.Add("NodeLogQuery")) + NodeOutOfServiceVolumeDetach = framework.WithFeature(framework.ValidFeatures.Add("NodeOutOfServiceVolumeDetach")) + NoSNAT = framework.WithFeature(framework.ValidFeatures.Add("NoSNAT")) + PersistentVolumeLastPhaseTransitionTime = framework.WithFeature(framework.ValidFeatures.Add("PersistentVolumeLastPhaseTransitionTime")) + PerformanceDNS = framework.WithFeature(framework.ValidFeatures.Add("PerformanceDNS")) + PodGarbageCollector = framework.WithFeature(framework.ValidFeatures.Add("PodGarbageCollector")) + PodHostIPs = framework.WithFeature(framework.ValidFeatures.Add("PodHostIPs")) + PodLifecycleSleepAction = framework.WithFeature(framework.ValidFeatures.Add("PodLifecycleSleepAction")) + PodPriority = framework.WithFeature(framework.ValidFeatures.Add("PodPriority")) + PodReadyToStartContainersCondition = framework.WithFeature(framework.ValidFeatures.Add("PodReadyToStartContainersCondition")) + PodResources = framework.WithFeature(framework.ValidFeatures.Add("PodResources")) + Reboot = framework.WithFeature(framework.ValidFeatures.Add("Reboot")) + ReclaimPolicy = framework.WithFeature(framework.ValidFeatures.Add("ReclaimPolicy")) + RecoverVolumeExpansionFailure = framework.WithFeature(framework.ValidFeatures.Add("RecoverVolumeExpansionFailure")) + Recreate = framework.WithFeature(framework.ValidFeatures.Add("Recreate")) + RegularResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("RegularResourceUsageTracking")) + ScopeSelectors = framework.WithFeature(framework.ValidFeatures.Add("ScopeSelectors")) + SCTPConnectivity = framework.WithFeature(framework.ValidFeatures.Add("SCTPConnectivity")) + SeccompDefault = framework.WithFeature(framework.ValidFeatures.Add("SeccompDefault")) + SELinux = framework.WithFeature(framework.ValidFeatures.Add("SELinux")) + SELinuxMountReadWriteOncePod = framework.WithFeature(framework.ValidFeatures.Add("SELinuxMountReadWriteOncePod")) + SidecarContainers = framework.WithFeature(framework.ValidFeatures.Add("SidecarContainers")) + StackdriverAcceleratorMonitoring = framework.WithFeature(framework.ValidFeatures.Add("StackdriverAcceleratorMonitoring")) + StackdriverCustomMetrics = framework.WithFeature(framework.ValidFeatures.Add("StackdriverCustomMetrics")) + StackdriverExternalMetrics = framework.WithFeature(framework.ValidFeatures.Add("StackdriverExternalMetrics")) + StackdriverMetadataAgent = framework.WithFeature(framework.ValidFeatures.Add("StackdriverMetadataAgent")) + StackdriverMonitoring = framework.WithFeature(framework.ValidFeatures.Add("StackdriverMonitoring")) + StandaloneMode = framework.WithFeature(framework.ValidFeatures.Add("StandaloneMode")) + StatefulSet = framework.WithFeature(framework.ValidFeatures.Add("StatefulSet")) + StatefulSetStartOrdinal = framework.WithFeature(framework.ValidFeatures.Add("StatefulSetStartOrdinal")) + StatefulUpgrade = framework.WithFeature(framework.ValidFeatures.Add("StatefulUpgrade")) + StorageProvider = framework.WithFeature(framework.ValidFeatures.Add("StorageProvider")) + StorageVersionAPI = framework.WithFeature(framework.ValidFeatures.Add("StorageVersionAPI")) + TopologyHints = framework.WithFeature(framework.ValidFeatures.Add("Topology Hints")) + TopologyManager = framework.WithFeature(framework.ValidFeatures.Add("TopologyManager")) + UDP = framework.WithFeature(framework.ValidFeatures.Add("UDP")) + Upgrade = framework.WithFeature(framework.ValidFeatures.Add("Upgrade")) + UserNamespacesSupport = framework.WithFeature(framework.ValidFeatures.Add("UserNamespacesSupport")) + ValidatingAdmissionPolicy = framework.WithFeature(framework.ValidFeatures.Add("ValidatingAdmissionPolicy")) + Volumes = framework.WithFeature(framework.ValidFeatures.Add("Volumes")) + VolumeSnapshotDataSource = framework.WithFeature(framework.ValidFeatures.Add("VolumeSnapshotDataSource")) + VolumeSourceXFS = framework.WithFeature(framework.ValidFeatures.Add("VolumeSourceXFS")) + Vsphere = framework.WithFeature(framework.ValidFeatures.Add("vsphere")) + WatchList = framework.WithFeature(framework.ValidFeatures.Add("WatchList")) + Windows = framework.WithFeature(framework.ValidFeatures.Add("Windows")) + WindowsHostProcessContainers = framework.WithFeature(framework.ValidFeatures.Add("WindowsHostProcessContainers")) + WindowsHyperVContainers = framework.WithFeature(framework.ValidFeatures.Add("WindowsHyperVContainers")) +) + +func init() { + // This prevents adding additional ad-hoc features in tests. + framework.ValidFeatures.Freeze() +} diff --git a/test/e2e/framework/.import-restrictions b/test/e2e/framework/.import-restrictions index f3070eeed8526..660e7453fa7c3 100644 --- a/test/e2e/framework/.import-restrictions +++ b/test/e2e/framework/.import-restrictions @@ -4,21 +4,62 @@ rules: # The following packages are okay to use: # # public API - - selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)/|^[a-z]+(/|$)|github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils) + - selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils) allowedPrefixes: [ "" ] # stdlib - selectorRegexp: ^[a-z]+(/|$) allowedPrefixes: [ "" ] - # Ginkgo + Gomega. - - selectorRegexp: github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils) + # stdlib x and proto + - selectorRegexp: ^golang.org/x|^google.golang.org/protobuf + allowedPrefixes: [ "" ] + + # Ginkgo + Gomega + - selectorRegexp: ^github.com/onsi/(ginkgo|gomega) + allowedPrefixes: [ "" ] + + # kube-openapi + - selectorRegexp: ^k8s.io/kube-openapi + allowedPrefixes: [ "" ] + + # Public SIG Repos + - selectorRegexp: ^sigs.k8s.io/(json|yaml|structured-merge-diff) allowedPrefixes: [ "" ] # some of the shared test helpers (but not E2E sub-packages!) - selectorRegexp: ^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils) allowedPrefixes: [ "" ] + # Third party deps + - selectorRegexp: ^github.com/|^gopkg.in + allowedPrefixes: [ + "gopkg.in/inf.v0", + "gopkg.in/yaml.v2", + "github.com/blang/semver/", + "github.com/davecgh/go-spew/spew", + "github.com/evanphx/json-patch", + "github.com/go-logr/logr", + "github.com/gogo/protobuf/proto", + "github.com/gogo/protobuf/sortkeys", + "github.com/golang/protobuf/proto", + "github.com/google/gnostic-models/openapiv2", + "github.com/google/gnostic-models/openapiv3", + "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", + "github.com/google/gofuzz", + "github.com/google/uuid", + "github.com/imdario/mergo", + "github.com/prometheus/client_golang/", + "github.com/prometheus/client_model/", + "github.com/prometheus/common/", + "github.com/prometheus/procfs", + "github.com/spf13/cobra", + "github.com/spf13/pflag", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require" + ] + # Everything else isn't. # # In particular importing any test/e2e/framework/* package would be a diff --git a/test/e2e/framework/OWNERS b/test/e2e/framework/OWNERS index f12bdef922dc0..75130916b221f 100644 --- a/test/e2e/framework/OWNERS +++ b/test/e2e/framework/OWNERS @@ -2,7 +2,6 @@ approvers: - andrewsykim - - fabriziopandini - pohly - oomichi - neolit123 @@ -10,7 +9,6 @@ approvers: reviewers: - sig-testing-reviewers - andrewsykim - - fabriziopandini - pohly - oomichi - neolit123 @@ -18,4 +16,5 @@ reviewers: labels: - area/e2e-test-framework emeritus_approvers: + - fabriziopandini - timothysc diff --git a/test/e2e/framework/README.md b/test/e2e/framework/README.md index f8ed1eff260ac..2f5e79677fcbc 100644 --- a/test/e2e/framework/README.md +++ b/test/e2e/framework/README.md @@ -4,7 +4,7 @@ The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main usage is for these tests suites in the Kubernetes repository itself: - test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is used for conformance testing. -- test/e2e_node: runs on the same node as a kublet instance. Used for testing +- test/e2e_node: runs on the same node as a kubelet instance. Used for testing kubelet. - test/e2e_kubeadm: test suite for kubeadm. diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index d52ce3bb07eca..30b06e88e294f 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -495,7 +495,7 @@ func (rc *ResourceConsumer) GetHpa(ctx context.Context, name string) (*autoscali // WaitForReplicas wait for the desired replicas func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas int, duration time.Duration) { interval := 20 * time.Second - err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, interval, duration, true, func(ctx context.Context) (bool, error) { replicas := rc.GetReplicas(ctx) framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) return replicas == desiredReplicas, nil // Expected number of replicas found. Exit. @@ -506,7 +506,7 @@ func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas // EnsureDesiredReplicasInRange ensure the replicas is in a desired range func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) { interval := 10 * time.Second - err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, interval, duration, true, func(ctx context.Context) (bool, error) { replicas := rc.GetReplicas(ctx) framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas) as, err := rc.GetHpa(ctx, hpaName) @@ -524,7 +524,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, mi } }) // The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time). - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { framework.Logf("Number of replicas was stable over %v", duration) return } @@ -964,7 +964,7 @@ func CreateCustomResourceDefinition(ctx context.Context, c crdclientset.Interfac crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crdSchema, metav1.CreateOptions{}) framework.ExpectNoError(err) // Wait until just created CRD appears in discovery. - err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) { return ExistsInDiscovery(crd, c, "v1") }) framework.ExpectNoError(err) diff --git a/test/e2e/framework/bugs.go b/test/e2e/framework/bugs.go new file mode 100644 index 0000000000000..a82023533070a --- /dev/null +++ b/test/e2e/framework/bugs.go @@ -0,0 +1,108 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +var ( + bugs []Bug + bugMutex sync.Mutex +) + +// RecordBug stores information about a bug in the E2E suite source code that +// cannot be reported through ginkgo.Fail because it was found outside of some +// test, for example during test registration. +// +// This can be used instead of raising a panic. Then all bugs can be reported +// together instead of failing after the first one. +func RecordBug(bug Bug) { + bugMutex.Lock() + defer bugMutex.Unlock() + + bugs = append(bugs, bug) +} + +type Bug struct { + FileName string + LineNumber int + Message string +} + +// NewBug creates a new bug with a location that is obtained by skipping a certain number +// of stack frames. Passing zero will record the source code location of the direct caller +// of NewBug. +func NewBug(message string, skip int) Bug { + location := types.NewCodeLocation(skip + 1) + return Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message} +} + +// FormatBugs produces a report that includes all bugs recorded earlier via +// RecordBug. An error is returned with the report if there have been bugs. +func FormatBugs() error { + bugMutex.Lock() + defer bugMutex.Unlock() + + if len(bugs) == 0 { + return nil + } + + lines := make([]string, 0, len(bugs)) + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("get current directory: %v", err) + } + // Sort by file name, line number, message. For the sake of simplicity + // this uses the full file name even though the output the may use a + // relative path. Usually the result should be the same because full + // paths will all have the same prefix. + sort.Slice(bugs, func(i, j int) bool { + switch strings.Compare(bugs[i].FileName, bugs[j].FileName) { + case -1: + return true + case 1: + return false + } + if bugs[i].LineNumber < bugs[j].LineNumber { + return true + } + if bugs[i].LineNumber > bugs[j].LineNumber { + return false + } + return bugs[i].Message < bugs[j].Message + }) + for _, bug := range bugs { + // Use relative paths, if possible. + path := bug.FileName + if wd != "" { + if relpath, err := filepath.Rel(wd, bug.FileName); err == nil { + path = relpath + } + } + lines = append(lines, fmt.Sprintf("ERROR: %s:%d: %s\n", path, bug.LineNumber, strings.TrimSpace(bug.Message))) + } + return errors.New(strings.Join(lines, "")) +} diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index b38c4a1eea825..1ad19b455a8ef 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -34,7 +34,7 @@ type Action func() error // Please note delivery of events is not guaranteed. Asserting on events can lead to flaky tests. func WaitTimeoutForEvent(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { interval := 2 * time.Second - return wait.PollImmediateWithContext(ctx, interval, timeout, eventOccurred(c, namespace, eventSelector, msg)) + return wait.PollUntilContextTimeout(ctx, interval, timeout, true, eventOccurred(c, namespace, eventSelector, msg)) } func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionWithContextFunc { diff --git a/test/e2e/framework/expect.go b/test/e2e/framework/expect.go index 5ad38f864156c..726e754b33f34 100644 --- a/test/e2e/framework/expect.go +++ b/test/e2e/framework/expect.go @@ -212,8 +212,9 @@ func newAsyncAssertion(ctx context.Context, args []interface{}, consistently boo args: args, // PodStart is used as default because waiting for a pod is the // most common operation. - timeout: TestContext.timeouts.PodStart, - interval: TestContext.timeouts.Poll, + timeout: TestContext.timeouts.PodStart, + interval: TestContext.timeouts.Poll, + consistently: consistently, } } @@ -292,13 +293,6 @@ func (f *FailureError) backtrace() { // } var ErrFailure error = FailureError{} -// ExpectEqual expects the specified two are the same, otherwise an exception raises -// -// Deprecated: use gomega.Expect().To(gomega.Equal()) -func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) -} - // ExpectNotEqual expects the specified two are not the same, otherwise an exception raises // // Deprecated: use gomega.Expect().ToNot(gomega.Equal()) @@ -362,24 +356,3 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { } Fail(prefix+err.Error(), 1+offset) } - -// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter. -// -// Deprecated: use gomega.Expect().To(gomega.ConsistOf()) instead -func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...) -} - -// ExpectHaveKey expects the actual map has the key in the keyset -// -// Deprecated: use gomega.Expect().To(gomega.HaveKey()) instead -func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...) -} - -// ExpectEmpty expects actual is empty -// -// Deprecated: use gomega.Expect().To(gomega.BeEmpty()) instead -func ExpectEmpty(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...) -} diff --git a/test/e2e/framework/ginkgowrapper.go b/test/e2e/framework/ginkgowrapper.go index e35fc4ae98233..01d226a578b92 100644 --- a/test/e2e/framework/ginkgowrapper.go +++ b/test/e2e/framework/ginkgowrapper.go @@ -17,13 +17,73 @@ limitations under the License. package framework import ( + "fmt" "path" "reflect" + "regexp" + "slices" + "strings" "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/types" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" +) + +// Feature is the name of a certain feature that the cluster under test must have. +// Such features are different from feature gates. +type Feature string + +// Environment is the name for the environment in which a test can run, like +// "Linux" or "Windows". +type Environment string + +// NodeFeature is the name of a feature that a node must support. To be +// removed, see +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-testing/3041-node-conformance-and-features#nodefeature. +type NodeFeature string + +type Valid[T comparable] struct { + items sets.Set[T] + frozen bool +} + +// Add registers a new valid item name. The expected usage is +// +// var SomeFeature = framework.ValidFeatures.Add("Some") +// +// during the init phase of an E2E suite. Individual tests should not register +// their own, to avoid uncontrolled proliferation of new items. E2E suites can, +// but don't have to, enforce that by freezing the set of valid names. +func (v *Valid[T]) Add(item T) T { + if v.frozen { + RecordBug(NewBug(fmt.Sprintf(`registry %T is already frozen, "%v" must not be added anymore`, *v, item), 1)) + } + if v.items == nil { + v.items = sets.New[T]() + } + if v.items.Has(item) { + RecordBug(NewBug(fmt.Sprintf(`registry %T already contains "%v", it must not be added again`, *v, item), 1)) + } + v.items.Insert(item) + return item +} + +func (v *Valid[T]) Freeze() { + v.frozen = true +} + +// These variables contain the parameters that [WithFeature], [WithEnvironment] +// and [WithNodeFeatures] accept. The framework itself has no pre-defined +// constants. Test suites and tests may define their own and then add them here +// before calling these With functions. +var ( + ValidFeatures Valid[Feature] + ValidEnvironments Valid[Environment] + ValidNodeFeatures Valid[NodeFeature] ) var errInterface = reflect.TypeOf((*error)(nil)).Elem() @@ -65,8 +125,355 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati return codeLocation } +// SIGDescribe returns a wrapper function for ginkgo.Describe which injects +// the SIG name as annotation. The parameter should be lowercase with +// no spaces and no sig- or SIG- prefix. +func SIGDescribe(sig string) func(...interface{}) bool { + if !sigRE.MatchString(sig) || strings.HasPrefix(sig, "sig-") { + RecordBug(NewBug(fmt.Sprintf("SIG label must be lowercase, no spaces and no sig- prefix, got instead: %q", sig), 1)) + } + return func(args ...interface{}) bool { + args = append([]interface{}{WithLabel("sig-" + sig)}, args...) + return registerInSuite(ginkgo.Describe, args) + } +} + +var sigRE = regexp.MustCompile(`^[a-z]+(-[a-z]+)*$`) + // ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier. -func ConformanceIt(text string, args ...interface{}) bool { - args = append(args, ginkgo.Offset(1)) - return ginkgo.It(text+" [Conformance]", args...) +func ConformanceIt(args ...interface{}) bool { + args = append(args, ginkgo.Offset(1), WithConformance()) + return It(args...) +} + +// It is a wrapper around [ginkgo.It] which supports framework With* labels as +// optional arguments in addition to those already supported by ginkgo itself, +// like [ginkgo.Label] and [gingko.Offset]. +// +// Text and arguments may be mixed. The final text is a concatenation +// of the text arguments and special tags from the With functions. +func It(args ...interface{}) bool { + return registerInSuite(ginkgo.It, args) +} + +// It is a shorthand for the corresponding package function. +func (f *Framework) It(args ...interface{}) bool { + return registerInSuite(ginkgo.It, args) +} + +// Describe is a wrapper around [ginkgo.Describe] which supports framework +// With* labels as optional arguments in addition to those already supported by +// ginkgo itself, like [ginkgo.Label] and [gingko.Offset]. +// +// Text and arguments may be mixed. The final text is a concatenation +// of the text arguments and special tags from the With functions. +func Describe(args ...interface{}) bool { + return registerInSuite(ginkgo.Describe, args) +} + +// Describe is a shorthand for the corresponding package function. +func (f *Framework) Describe(args ...interface{}) bool { + return registerInSuite(ginkgo.Describe, args) +} + +// Context is a wrapper around [ginkgo.Context] which supports framework With* +// labels as optional arguments in addition to those already supported by +// ginkgo itself, like [ginkgo.Label] and [gingko.Offset]. +// +// Text and arguments may be mixed. The final text is a concatenation +// of the text arguments and special tags from the With functions. +func Context(args ...interface{}) bool { + return registerInSuite(ginkgo.Context, args) +} + +// Context is a shorthand for the corresponding package function. +func (f *Framework) Context(args ...interface{}) bool { + return registerInSuite(ginkgo.Context, args) +} + +// registerInSuite is the common implementation of all wrapper functions. It +// expects to be called through one intermediate wrapper. +func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interface{}) bool { + var ginkgoArgs []interface{} + var offset ginkgo.Offset + var texts []string + + addLabel := func(label string) { + texts = append(texts, fmt.Sprintf("[%s]", label)) + ginkgoArgs = append(ginkgoArgs, ginkgo.Label(label)) + } + + haveEmptyStrings := false + for _, arg := range args { + switch arg := arg.(type) { + case label: + fullLabel := strings.Join(arg.parts, ":") + addLabel(fullLabel) + if arg.extra != "" { + addLabel(arg.extra) + } + if fullLabel == "Serial" { + ginkgoArgs = append(ginkgoArgs, ginkgo.Serial) + } + case ginkgo.Offset: + offset = arg + case string: + if arg == "" { + haveEmptyStrings = true + } + texts = append(texts, arg) + default: + ginkgoArgs = append(ginkgoArgs, arg) + } + } + offset += 2 // This function and its direct caller. + + // Now that we have the final offset, we can record bugs. + if haveEmptyStrings { + RecordBug(NewBug("empty strings as separators are unnecessary and need to be removed", int(offset))) + } + + // Enforce that text snippets to not start or end with spaces because + // those lead to double spaces when concatenating below. + for _, text := range texts { + if strings.HasPrefix(text, " ") || strings.HasSuffix(text, " ") { + RecordBug(NewBug(fmt.Sprintf("trailing or leading spaces are unnecessary and need to be removed: %q", text), int(offset))) + } + } + + ginkgoArgs = append(ginkgoArgs, offset) + text := strings.Join(texts, " ") + return ginkgoCall(text, ginkgoArgs...) +} + +// WithEnvironment specifies that a certain test or group of tests only works +// with a feature available. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// The feature must be listed in ValidFeatures. +func WithFeature(name Feature) interface{} { + return withFeature(name) +} + +// WithFeature is a shorthand for the corresponding package function. +func (f *Framework) WithFeature(name Feature) interface{} { + return withFeature(name) +} + +func withFeature(name Feature) interface{} { + if !ValidFeatures.items.Has(name) { + RecordBug(NewBug(fmt.Sprintf("WithFeature: unknown feature %q", name), 2)) + } + return newLabel("Feature", string(name)) +} + +// WithFeatureGate specifies that a certain test or group of tests depends on a +// feature gate being enabled. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// The feature gate must be listed in +// [k8s.io/apiserver/pkg/util/feature.DefaultMutableFeatureGate]. Once a +// feature gate gets removed from there, the WithFeatureGate calls using it +// also need to be removed. +func WithFeatureGate(featureGate featuregate.Feature) interface{} { + return withFeatureGate(featureGate) +} + +// WithFeatureGate is a shorthand for the corresponding package function. +func (f *Framework) WithFeatureGate(featureGate featuregate.Feature) interface{} { + return withFeatureGate(featureGate) +} + +func withFeatureGate(featureGate featuregate.Feature) interface{} { + spec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[featureGate] + if !ok { + RecordBug(NewBug(fmt.Sprintf("WithFeatureGate: the feature gate %q is unknown", featureGate), 2)) + } + + // We use mixed case (i.e. Beta instead of BETA). GA feature gates have no level string. + var level string + if spec.PreRelease != "" { + level = string(spec.PreRelease) + level = strings.ToUpper(level[0:1]) + strings.ToLower(level[1:]) + } + + l := newLabel("FeatureGate", string(featureGate)) + l.extra = level + return l +} + +// WithEnvironment specifies that a certain test or group of tests only works +// in a certain environment. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// The environment must be listed in ValidEnvironments. +func WithEnvironment(name Environment) interface{} { + return withEnvironment(name) +} + +// WithEnvironment is a shorthand for the corresponding package function. +func (f *Framework) WithEnvironment(name Environment) interface{} { + return withEnvironment(name) +} + +func withEnvironment(name Environment) interface{} { + if !ValidEnvironments.items.Has(name) { + RecordBug(NewBug(fmt.Sprintf("WithEnvironment: unknown environment %q", name), 2)) + } + return newLabel("Environment", string(name)) +} + +// WithNodeFeature specifies that a certain test or group of tests only works +// if the node supports a certain feature. The return value must be passed as +// additional argument to [framework.It], [framework.Describe], +// [framework.Context]. +// +// The environment must be listed in ValidNodeFeatures. +func WithNodeFeature(name NodeFeature) interface{} { + return withNodeFeature(name) +} + +// WithNodeFeature is a shorthand for the corresponding package function. +func (f *Framework) WithNodeFeature(name NodeFeature) interface{} { + return withNodeFeature(name) +} + +func withNodeFeature(name NodeFeature) interface{} { + if !ValidNodeFeatures.items.Has(name) { + RecordBug(NewBug(fmt.Sprintf("WithNodeFeature: unknown environment %q", name), 2)) + } + return newLabel("NodeFeature", string(name)) +} + +// WithConformace specifies that a certain test or group of tests must pass in +// all conformant Kubernetes clusters. The return value must be passed as +// additional argument to [framework.It], [framework.Describe], +// [framework.Context]. +func WithConformance() interface{} { + return withConformance() +} + +// WithConformance is a shorthand for the corresponding package function. +func (f *Framework) WithConformance() interface{} { + return withConformance() +} + +func withConformance() interface{} { + return newLabel("Conformance") +} + +// WithNodeConformance specifies that a certain test or group of tests for node +// functionality that does not depend on runtime or Kubernetes distro specific +// behavior. The return value must be passed as additional argument to +// [framework.It], [framework.Describe], [framework.Context]. +func WithNodeConformance() interface{} { + return withNodeConformance() +} + +// WithNodeConformance is a shorthand for the corresponding package function. +func (f *Framework) WithNodeConformance() interface{} { + return withNodeConformance() +} + +func withNodeConformance() interface{} { + return newLabel("NodeConformance") +} + +// WithDisruptive specifies that a certain test or group of tests temporarily +// affects the functionality of the Kubernetes cluster. The return value must +// be passed as additional argument to [framework.It], [framework.Describe], +// [framework.Context]. +func WithDisruptive() interface{} { + return withDisruptive() +} + +// WithDisruptive is a shorthand for the corresponding package function. +func (f *Framework) WithDisruptive() interface{} { + return withDisruptive() +} + +func withDisruptive() interface{} { + return newLabel("Disruptive") +} + +// WithSerial specifies that a certain test or group of tests must not run in +// parallel with other tests. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +// +// Starting with ginkgo v2, serial and parallel tests can be executed in the +// same invocation. Ginkgo itself will ensure that the serial tests run +// sequentially. +func WithSerial() interface{} { + return withSerial() +} + +// WithSerial is a shorthand for the corresponding package function. +func (f *Framework) WithSerial() interface{} { + return withSerial() +} + +func withSerial() interface{} { + return newLabel("Serial") +} + +// WithSlow specifies that a certain test or group of tests must not run in +// parallel with other tests. The return value must be passed as additional +// argument to [framework.It], [framework.Describe], [framework.Context]. +func WithSlow() interface{} { + return withSlow() +} + +// WithSlow is a shorthand for the corresponding package function. +func (f *Framework) WithSlow() interface{} { + return WithSlow() +} + +func withSlow() interface{} { + return newLabel("Slow") +} + +// WithLabel is a wrapper around [ginkgo.Label]. Besides adding an arbitrary +// label to a test, it also injects the label in square brackets into the test +// name. +func WithLabel(label string) interface{} { + return withLabel(label) +} + +// WithLabel is a shorthand for the corresponding package function. +func (f *Framework) WithLabel(label string) interface{} { + return withLabel(label) +} + +func withLabel(label string) interface{} { + return newLabel(label) +} + +type label struct { + // parts get concatenated with ":" to build the full label. + parts []string + // extra is an optional fully-formed extra label. + extra string +} + +func newLabel(parts ...string) label { + return label{parts: parts} +} + +// TagsEqual can be used to check whether two tags are the same. +// It's safe to compare e.g. the result of WithSlow() against the result +// of WithSerial(), the result will be false. False is also returned +// when a parameter is some completely different value. +func TagsEqual(a, b interface{}) bool { + al, ok := a.(label) + if !ok { + return false + } + bl, ok := b.(label) + if !ok { + return false + } + if al.extra != bl.extra { + return false + } + return slices.Equal(al.parts, bl.parts) } diff --git a/test/e2e/framework/ginkgowrapper_test.go b/test/e2e/framework/ginkgowrapper_test.go new file mode 100644 index 0000000000000..99125a08eb96d --- /dev/null +++ b/test/e2e/framework/ginkgowrapper_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "testing" + + "k8s.io/kubernetes/test/e2e/framework/internal/unittests/features" +) + +func TestTagsEqual(t *testing.T) { + testcases := []struct { + a, b interface{} + expectEqual bool + }{ + {1, 2, false}, + {2, 2, false}, + {WithSlow(), 2, false}, + {WithSlow(), WithSerial(), false}, + {WithSerial(), WithSlow(), false}, + {WithSlow(), WithSlow(), true}, + {WithSerial(), WithSerial(), true}, + {WithLabel("hello"), WithLabel("world"), false}, + {WithLabel("hello"), WithLabel("hello"), true}, + {WithFeatureGate(features.Test), WithLabel("Test"), false}, + {WithFeatureGate(features.Test), WithFeatureGate(features.Test), true}, + } + + for _, tc := range testcases { + t.Run(fmt.Sprintf("%v=%v", tc.a, tc.b), func(t *testing.T) { + actualEqual := TagsEqual(tc.a, tc.b) + if actualEqual != tc.expectEqual { + t.Errorf("expected %v, got %v", tc.expectEqual, actualEqual) + } + }) + } +} diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 908f4cbbabfcc..0275848cacccb 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -196,7 +196,7 @@ func SimpleGET(ctx context.Context, c *http.Client, url, host string) (string, e // expectUnreachable is true, it breaks on first non-healthy http code instead. func PollURL(ctx context.Context, route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error { var lastBody string - pollErr := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { + pollErr := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) { var err error lastBody, err = SimpleGET(ctx, httpClient, route, host) if err != nil { @@ -733,7 +733,7 @@ func getIngressAddress(ctx context.Context, client clientset.Interface, ns, name // WaitForIngressAddress waits for the Ingress to acquire an address. func (j *TestJig) WaitForIngressAddress(ctx context.Context, c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { var address string - err := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) { ipOrNameList, err := getIngressAddress(ctx, c, ns, ingName, j.Class) if err != nil || len(ipOrNameList) == 0 { j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList) @@ -889,7 +889,7 @@ func getPortURL(ctx context.Context, client clientset.Interface, ns, name string // unschedulable, since control plane nodes don't run kube-proxy. Without // kube-proxy NodePorts won't work. var nodes *v1.NodeList - if wait.PollImmediateWithContext(ctx, poll, framework.SingleCallTimeout, func(ctx context.Context) (bool, error) { + if wait.PollUntilContextTimeout(ctx, poll, framework.SingleCallTimeout, true, func(ctx context.Context) (bool, error) { nodes, err = client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) diff --git a/test/e2e/framework/internal/output/output.go b/test/e2e/framework/internal/output/output.go index e64c0894e5c2b..4599817ad666d 100644 --- a/test/e2e/framework/internal/output/output.go +++ b/test/e2e/framework/internal/output/output.go @@ -110,6 +110,7 @@ func simplify(in string, expected TestResult) string { out := normalizeLocation(in) out = stripTimes(out) out = stripAddresses(out) + out = normalizeInitFunctions(out) if expected.NormalizeOutput != nil { out = expected.NormalizeOutput(out) } @@ -178,3 +179,12 @@ func normalizeLocation(in string) string { out = klogPrefix.ReplaceAllString(out, " ") return out } + +var initFunc = regexp.MustCompile(`(init\.+func|glob\.+func)`) + +// normalizeInitFunctions maps both init.func (used by Go >= 1.22) and +// glob..func (used by Go < 1.22) to . +func normalizeInitFunctions(in string) string { + out := initFunc.ReplaceAllString(in, "") + return out +} diff --git a/test/e2e/framework/internal/unittests/bugs/bugs.go b/test/e2e/framework/internal/unittests/bugs/bugs.go new file mode 100644 index 0000000000000..dac80ca9a036e --- /dev/null +++ b/test/e2e/framework/internal/unittests/bugs/bugs.go @@ -0,0 +1,171 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bugs + +import ( + "bytes" + "testing" + + "github.com/onsi/ginkgo/v2" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/internal/unittests/bugs/features" +) + +// The line number of the following code is checked in BugOutput below. +// Be careful when moving it around or changing the import statements above. +// Here are some intentionally blank lines that can be removed to compensate +// for future additional import statements. +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// This must be line #50. + +func helper() { + framework.RecordBug(framework.NewBug("new bug", 0)) + framework.RecordBug(framework.NewBug("parent", 1)) +} + +func RecordBugs() { + helper() + framework.RecordBug(framework.Bug{FileName: "buggy/buggy.go", LineNumber: 100, Message: "hello world"}) + framework.RecordBug(framework.Bug{FileName: "some/relative/path/buggy.go", LineNumber: 200, Message: " with spaces \n"}) +} + +var ( + validFeature = framework.ValidFeatures.Add("feature-foo") + validEnvironment = framework.ValidEnvironments.Add("Linux") + validNodeFeature = framework.ValidNodeFeatures.Add("node-feature-foo") +) + +func Describe() { + // Normally a single line would be better, but this is an extreme example and + // thus uses multiple. + framework.SIGDescribe("testing")("abc", + // Bugs in parameters will be attributed to the Describe call, not the line of the parameter. + "", // buggy: not needed + " space1", // buggy: leading white space + "space2 ", // buggy: trailing white space + framework.WithFeature("no-such-feature"), + framework.WithFeature(validFeature), + framework.WithEnvironment("no-such-env"), + framework.WithEnvironment(validEnvironment), + framework.WithNodeFeature("no-such-node-env"), + framework.WithNodeFeature(validNodeFeature), + framework.WithFeatureGate("no-such-feature-gate"), + framework.WithFeatureGate(features.Alpha), + framework.WithFeatureGate(features.Beta), + framework.WithFeatureGate(features.GA), + framework.WithConformance(), + framework.WithNodeConformance(), + framework.WithSlow(), + framework.WithSerial(), + framework.WithDisruptive(), + framework.WithLabel("custom-label"), + "xyz", // okay, becomes part of the final text + func() { + f := framework.NewDefaultFramework("abc") + + framework.Context("y", framework.WithLabel("foo"), func() { + framework.It("should", f.WithLabel("bar"), func() { + }) + }) + + f.Context("x", f.WithLabel("foo"), func() { + f.It("should", f.WithLabel("bar"), func() { + }) + }) + }, + ) + + framework.SIGDescribe("123") +} + +const ( + numBugs = 3 + bugOutput = `ERROR: bugs.go:53: new bug +ERROR: bugs.go:58: parent +ERROR: bugs.go:72: empty strings as separators are unnecessary and need to be removed +ERROR: bugs.go:72: trailing or leading spaces are unnecessary and need to be removed: " space1" +ERROR: bugs.go:72: trailing or leading spaces are unnecessary and need to be removed: "space2 " +ERROR: bugs.go:77: WithFeature: unknown feature "no-such-feature" +ERROR: bugs.go:79: WithEnvironment: unknown environment "no-such-env" +ERROR: bugs.go:81: WithNodeFeature: unknown environment "no-such-node-env" +ERROR: bugs.go:83: WithFeatureGate: the feature gate "no-such-feature-gate" is unknown +ERROR: bugs.go:109: SIG label must be lowercase, no spaces and no sig- prefix, got instead: "123" +ERROR: buggy/buggy.go:100: hello world +ERROR: some/relative/path/buggy.go:200: with spaces +` + // Used by unittests/list-tests. It's sorted by test name, not source code location. + ListTestsOutput = `The following spec names can be used with 'ginkgo run --focus/skip': + ../bugs/bugs.go:103: [sig-testing] abc space1 space2 [Feature:no-such-feature] [Feature:feature-foo] [Environment:no-such-env] [Environment:Linux] [NodeFeature:no-such-node-env] [NodeFeature:node-feature-foo] [FeatureGate:no-such-feature-gate] [FeatureGate:TestAlphaFeature] [Alpha] [FeatureGate:TestBetaFeature] [Beta] [FeatureGate:TestGAFeature] [Conformance] [NodeConformance] [Slow] [Serial] [Disruptive] [custom-label] xyz x [foo] should [bar] + ../bugs/bugs.go:98: [sig-testing] abc space1 space2 [Feature:no-such-feature] [Feature:feature-foo] [Environment:no-such-env] [Environment:Linux] [NodeFeature:no-such-node-env] [NodeFeature:node-feature-foo] [FeatureGate:no-such-feature-gate] [FeatureGate:TestAlphaFeature] [Alpha] [FeatureGate:TestBetaFeature] [Beta] [FeatureGate:TestGAFeature] [Conformance] [NodeConformance] [Slow] [Serial] [Disruptive] [custom-label] xyz y [foo] should [bar] + +` + + // Used by unittests/list-labels. + ListLabelsOutput = `The following labels can be used with 'gingko run --label-filter': + Alpha + Beta + Conformance + Disruptive + Environment:Linux + Environment:no-such-env + Feature:feature-foo + Feature:no-such-feature + FeatureGate:TestAlphaFeature + FeatureGate:TestBetaFeature + FeatureGate:TestGAFeature + FeatureGate:no-such-feature-gate + NodeConformance + NodeFeature:no-such-node-env + NodeFeature:node-feature-foo + Serial + Slow + bar + custom-label + foo + sig-testing + +` +) + +func GetGinkgoOutput(t *testing.T) string { + var buffer bytes.Buffer + ginkgo.GinkgoWriter.TeeTo(&buffer) + t.Cleanup(ginkgo.GinkgoWriter.ClearTeeWriters) + + suiteConfig, reporterConfig := framework.CreateGinkgoConfig() + fakeT := &testing.T{} + ginkgo.RunSpecs(fakeT, "Buggy Suite", suiteConfig, reporterConfig) + + return buffer.String() +} diff --git a/test/e2e/framework/internal/unittests/bugs/bugs_test.go b/test/e2e/framework/internal/unittests/bugs/bugs_test.go new file mode 100644 index 0000000000000..dd8a66c4e189d --- /dev/null +++ b/test/e2e/framework/internal/unittests/bugs/bugs_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bugs + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/internal/unittests" +) + +func TestBugs(t *testing.T) { + assert.NoError(t, framework.FormatBugs()) + RecordBugs() + Describe() + + err := framework.FormatBugs() + require.Error(t, err) + require.Equal(t, bugOutput, err.Error()) + + output, code := unittests.GetFrameworkOutput(t, nil) + assert.Equal(t, 1, code) + assert.Equal(t, "ERROR: E2E suite initialization was faulty, these errors must be fixed:\n"+bugOutput, output) +} diff --git a/test/e2e/framework/internal/unittests/bugs/features/features.go b/test/e2e/framework/internal/unittests/bugs/features/features.go new file mode 100644 index 0000000000000..092ea8a8bf05c --- /dev/null +++ b/test/e2e/framework/internal/unittests/bugs/features/features.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" +) + +const ( + Alpha featuregate.Feature = "TestAlphaFeature" + Beta featuregate.Feature = "TestBetaFeature" + GA featuregate.Feature = "TestGAFeature" +) + +func init() { + runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(testFeatureGates)) +} + +var testFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + Alpha: {PreRelease: featuregate.Alpha}, + Beta: {PreRelease: featuregate.Beta}, + GA: {PreRelease: featuregate.GA}, +} diff --git a/test/e2e/framework/internal/unittests/features/kube_features.go b/test/e2e/framework/internal/unittests/features/kube_features.go new file mode 100644 index 0000000000000..257701c2bb7d4 --- /dev/null +++ b/test/e2e/framework/internal/unittests/features/kube_features.go @@ -0,0 +1,35 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" +) + +const ( + Test featuregate.Feature = "Test" +) + +func init() { + runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) +} + +var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + Test: {Default: false, PreRelease: featuregate.Alpha}, +} diff --git a/test/e2e/framework/internal/unittests/framework_test.go b/test/e2e/framework/internal/unittests/framework_test.go index 30c8d8d03111a..efb56ba65563d 100644 --- a/test/e2e/framework/internal/unittests/framework_test.go +++ b/test/e2e/framework/internal/unittests/framework_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework_test +package unittests_test import ( "reflect" diff --git a/test/e2e/framework/internal/unittests/helpers.go b/test/e2e/framework/internal/unittests/helpers.go new file mode 100644 index 0000000000000..a3dcc3581ae1d --- /dev/null +++ b/test/e2e/framework/internal/unittests/helpers.go @@ -0,0 +1,61 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unittests + +import ( + "bytes" + "flag" + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/kubernetes/test/e2e/framework" +) + +// GetFrameworkOutput captures writes to framework.Output during a test suite setup +// and returns it together with any explicit Exit call code, -1 if none. +// May only be called once per test binary. +func GetFrameworkOutput(t *testing.T, flags map[string]string) (output string, finalExitCode int) { + // This simulates how test/e2e uses the framework and how users + // invoke test/e2e. + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + for flagname, value := range flags { + require.NoError(t, flag.Set(flagname, value), "set %s", flagname) + } + var buffer bytes.Buffer + framework.Output = &buffer + framework.Exit = func(code int) { + panic(exitCode(code)) + } + finalExitCode = -1 + defer func() { + if r := recover(); r != nil { + if code, ok := r.(exitCode); ok { + finalExitCode = int(code) + } else { + panic(r) + } + } + output = buffer.String() + }() + framework.AfterReadingAllFlags(&framework.TestContext) + + // Results set by defer. + return +} + +type exitCode int diff --git a/test/e2e/framework/internal/unittests/list-labels/listlabels_test.go b/test/e2e/framework/internal/unittests/list-labels/listlabels_test.go new file mode 100644 index 0000000000000..95b0416d9a46d --- /dev/null +++ b/test/e2e/framework/internal/unittests/list-labels/listlabels_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package listlabels + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/internal/unittests" + "k8s.io/kubernetes/test/e2e/framework/internal/unittests/bugs" +) + +func TestListTests(t *testing.T) { + bugs.Describe() + framework.CheckForBugs = false + output, code := unittests.GetFrameworkOutput(t, map[string]string{"list-labels": "true"}) + assert.Equal(t, 0, code) + assert.Equal(t, bugs.ListLabelsOutput, output) +} diff --git a/test/e2e/framework/internal/unittests/list-tests/listtests_test.go b/test/e2e/framework/internal/unittests/list-tests/listtests_test.go new file mode 100644 index 0000000000000..4981bd0aeb901 --- /dev/null +++ b/test/e2e/framework/internal/unittests/list-tests/listtests_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package listtests + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/internal/unittests" + "k8s.io/kubernetes/test/e2e/framework/internal/unittests/bugs" +) + +func TestListTests(t *testing.T) { + bugs.Describe() + framework.CheckForBugs = false + output, code := unittests.GetFrameworkOutput(t, map[string]string{"list-tests": "true"}) + assert.Equal(t, 0, code) + assert.Equal(t, bugs.ListTestsOutput, output) +} diff --git a/test/e2e/framework/job/fixtures.go b/test/e2e/framework/job/fixtures.go index ca0f6af8b28b2..7f5152419f9c0 100644 --- a/test/e2e/framework/job/fixtures.go +++ b/test/e2e/framework/job/fixtures.go @@ -80,16 +80,30 @@ func NewTestJobOnNode(behavior, name string, rPol v1.RestartPolicy, parallelism, SecurityContext: &v1.SecurityContext{}, }, }, - NodeName: nodeName, }, }, }, } + if len(nodeName) > 0 { + job.Spec.Template.Spec.NodeSelector = map[string]string{ + "kubernetes.io/hostname": nodeName, + } + } switch behavior { case "notTerminate": job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"} case "fail": job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"} + case "failOddSucceedEven": + job.Spec.Template.Spec.Containers[0].Command = []string{"sh", "-c"} + job.Spec.Template.Spec.Containers[0].Args = []string{` + if [ $(expr ${JOB_COMPLETION_INDEX} % 2) -ne 0 ]; then + exit 1 + else + exit 0 + fi + `, + } case "succeed": job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"} case "randomlySucceedOrFail": @@ -119,7 +133,7 @@ func NewTestJobOnNode(behavior, name string, rPol v1.RestartPolicy, parallelism, // setup host path directory to pass information between pod restarts func setupHostPathDirectory(job *batchv1.Job) { - if len(job.Spec.Template.Spec.NodeName) > 0 { + if _, nodeNameSpecified := job.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"]; nodeNameSpecified { randomDir := "/tmp/job-e2e/" + rand.String(10) hostPathType := v1.HostPathDirectoryOrCreate job.Spec.Template.Spec.Volumes[0].VolumeSource = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: randomDir, Type: &hostPathType}} diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index 3f94dc03182fe..c40e7815e4e2c 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -18,6 +18,7 @@ package job import ( "context" + "fmt" "time" batchv1 "k8s.io/api/batch/v1" @@ -27,8 +28,17 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/utils/format" + "k8s.io/utils/ptr" ) +// JobState is used to verify if Job matches a particular condition. +// If it matches, an empty string is returned. +// Otherwise, the string explains why the condition is not matched. +// This should be a short string. A dump of the job object will +// get added by the caller. +type JobState func(job *batchv1.Job) string + // WaitForJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use // when pods will run for a long time, or it will be racy. func WaitForJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32) error { @@ -68,6 +78,28 @@ func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName }) } +// WaitForJobReady waits for particular value of the Job .status.ready field +func WaitForJobReady(ctx context.Context, c clientset.Interface, ns, jobName string, ready *int32) error { + return WaitForJobState(ctx, c, ns, jobName, JobTimeout, func(job *batchv1.Job) string { + if ptr.Equal(ready, job.Status.Ready) { + return "" + } + return "job does not match intended ready status" + }) +} + +// WaitForJobSuspend uses c to wait for suspend condition for the Job jobName in namespace ns. +func WaitForJobSuspend(ctx context.Context, c clientset.Interface, ns, jobName string) error { + return WaitForJobState(ctx, c, ns, jobName, JobTimeout, func(job *batchv1.Job) string { + for _, c := range job.Status.Conditions { + if c.Type == batchv1.JobSuspended && c.Status == v1.ConditionTrue { + return "" + } + } + return "job should be suspended" + }) +} + // WaitForJobFailed uses c to wait for the Job jobName in namespace ns to fail func WaitForJobFailed(c clientset.Interface, ns, jobName string) error { return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { @@ -91,7 +123,7 @@ func isJobFailed(j *batchv1.Job) bool { // WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete). func WaitForJobFinish(ctx context.Context, c clientset.Interface, ns, jobName string) error { - return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) { curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err @@ -125,7 +157,7 @@ func WaitForJobGone(ctx context.Context, c clientset.Interface, ns, jobName stri // WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns // to be deleted. func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobName string) error { - return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { + return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) { pods, err := GetJobPods(ctx, c, ns, jobName) if err != nil { return false, err @@ -133,3 +165,20 @@ func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobNa return len(pods.Items) == 0, nil }) } + +// WaitForJobState waits for a job to be matched to the given condition. +// The condition callback may use gomega.StopTrying to abort early. +func WaitForJobState(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration, state JobState) error { + return framework.Gomega(). + Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.BatchV1().Jobs(ns).Get, jobName, metav1.GetOptions{}))). + WithTimeout(timeout). + Should(framework.MakeMatcher(func(job *batchv1.Job) (func() string, error) { + matches := state(job) + if matches == "" { + return nil, nil + } + return func() string { + return fmt.Sprintf("%v\n%s", matches, format.Object(job, 1)) + }, nil + })) +} diff --git a/test/e2e/framework/log_test.go b/test/e2e/framework/log_test.go index ce8c2d157f7ae..5f74eb3d4abdf 100644 --- a/test/e2e/framework/log_test.go +++ b/test/e2e/framework/log_test.go @@ -24,6 +24,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/gomega" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -36,8 +37,7 @@ import ( // Be careful when moving it around or changing the import statements above. // Here are some intentionally blank lines that can be removed to compensate // for future additional import statements. -// -// This must be line #39. +// This must be line #40. // This is included in a stack backtrace. func failHelper(msg string) { @@ -50,7 +50,7 @@ var _ = ginkgo.Describe("log", func() { }) ginkgo.AfterEach(func() { framework.Logf("after") - framework.ExpectEqual(true, false, "true is never false either") + gomega.Expect(true).To(gomega.BeFalse(), "true is never false either") }) ginkgo.It("fails", func() { func() { @@ -58,14 +58,14 @@ var _ = ginkgo.Describe("log", func() { }() }) ginkgo.It("asserts", func() { - framework.ExpectEqual(false, true, "false is never true") + gomega.Expect(false).To(gomega.BeTrue(), "false is never true") }) ginkgo.It("error", func() { err := errors.New("an error with a long, useless description") framework.ExpectNoError(err, "hard-coded error") }) ginkgo.It("equal", func() { - framework.ExpectEqual(0, 1, "of course it's not equal...") + gomega.Expect(0).To(gomega.Equal(1), "of course it's not equal...") }) ginkgo.It("fails with helper", func() { failHelper("I'm failing with helper.") @@ -109,8 +109,7 @@ INFO: after [FAILED] true is never false either Expected : true -to equal - : false +to be false In [AfterEach] at: log_test.go:53