From 0a7cafea67877e0c182345ae38085d736b0b9627 Mon Sep 17 00:00:00 2001 From: Oliver Gould Date: Mon, 7 Mar 2022 11:53:28 -0800 Subject: [PATCH] Test the policy controller admission webhook (#8008) The policy controller has an validating webhook for `Server` resources, but this functionality is not really tested. Before adding more policy resources that need validation, let's add an integration test that exercises resource validation. The initial test is pretty simplistic, but this is just setup. These test also help expose two issues: 1. The change in 8760c5f--to solely use the index for validation--is problematic, especially in CI where quick updates can pass validation when they should not. This is fixed by going back to making API calls when validating `Server` resources. 2. Our pod selector overlap detection is overly simplistic. This change updates it to at least detect when a server selects _all_ pods. There's probably more we can do here in followup changes. Tests are added in a new `policy-test` crate that only includes these tests and the utiltities they need. This crate is excluded when running unit tests and is only executed when it has a Kubernetes cluster it can execute against. A temporary namespace is created before each test is run and deleted as the test completes. The policy controller's CI workflow is updated to build the core control plane, run a k3d cluster, and exercise tests. This workflow has minimal dependencies on the existing script/CI tooling so that the dependencies are explicit and we can avoid some of the complexity of the existing test infrastructure. Signed-off-by: Oliver Gould --- .github/workflows/policy_controller.yml | 120 +++++++++++++++++++-- Cargo.lock | 39 +++++++ Cargo.toml | 2 +- policy-controller/amd64.dockerfile | 3 +- policy-controller/arm.dockerfile | 3 +- policy-controller/arm64.dockerfile | 3 +- policy-controller/k8s/api/src/labels.rs | 10 ++ policy-controller/src/admission.rs | 75 ++++++++----- policy-controller/src/lib.rs | 3 +- policy-controller/src/main.rs | 7 +- policy-test/Cargo.toml | 20 ++++ policy-test/README.md | 46 ++++++++ policy-test/src/admission.rs | 43 ++++++++ policy-test/src/lib.rs | 101 ++++++++++++++++++ policy-test/tests/admit_server.rs | 134 ++++++++++++++++++++++++ 15 files changed, 562 insertions(+), 47 deletions(-) create mode 100644 policy-test/Cargo.toml create mode 100644 policy-test/README.md create mode 100644 policy-test/src/admission.rs create mode 100644 policy-test/src/lib.rs create mode 100644 policy-test/tests/admit_server.rs diff --git a/.github/workflows/policy_controller.yml b/.github/workflows/policy_controller.yml index 2be0a44ed85db..dab8ee7bcc1f4 100644 --- a/.github/workflows/policy_controller.yml +++ b/.github/workflows/policy_controller.yml @@ -3,12 +3,15 @@ name: Policy Controller on: pull_request: paths: - - 'Cargo.lock' - - 'Cargo.toml' - - 'deny.toml' - - 'rust-toolchain' - - 'policy-controller/**' - - '.github/workflows/policy_controller.yml' + - .github/workflows/policy_controller.yml + - Cargo.lock + - Cargo.toml + - charts/linkerd-control-plane/templates/destination-rbac.yaml + - charts/linkerd-crds/templates/policy/** + - deny.toml + - policy-controller/** + - policy-test/** + - rust-toolchain permissions: contents: read @@ -17,6 +20,8 @@ env: CARGO_ACTION_FMT_VERSION: v0.1.3 CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 + K3D_VERSION: v5.3.0 + PROXY_INIT_VERSION: v1.5.3 RUST_BACKTRACE: short RUSTUP_MAX_RETRIES: 10 @@ -58,7 +63,7 @@ jobs: - run: | bin/scurl -o /usr/local/bin/cargo-action-fmt "https://github.com/olix0r/cargo-action-fmt/releases/download/release%2F${CARGO_ACTION_FMT_VERSION}/cargo-action-fmt-x86_64-unknown-linux-gnu" chmod 755 /usr/local/bin/cargo-action-fmt - - run: cargo fetch + - run: cargo fetch --locked - run: cargo clippy --frozen --all --no-deps --message-format=json | cargo-action-fmt check: @@ -71,7 +76,7 @@ jobs: - run: | bin/scurl -o /usr/local/bin/cargo-action-fmt "https://github.com/olix0r/cargo-action-fmt/releases/download/release%2F${CARGO_ACTION_FMT_VERSION}/cargo-action-fmt-x86_64-unknown-linux-gnu" chmod 755 /usr/local/bin/cargo-action-fmt - - run: cargo fetch + - run: cargo fetch --locked # Check each crate independently to ensure its Cargo.toml is sufficient. - run: | for toml in $(find . -mindepth 2 -name Cargo.toml | sort -r) @@ -89,9 +94,9 @@ jobs: image: docker://rust:1.59.0 steps: - uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 - - run: cargo fetch - - run: cargo test --workspace --frozen --no-run - - run: cargo test --workspace --frozen + - run: cargo fetch --locked + - run: cargo test --workspace --exclude=linkerd-policy-test --frozen --no-run + - run: cargo test --workspace --exclude=linkerd-policy-test --frozen rust-toolchain: name: rust toolchain @@ -120,3 +125,96 @@ jobs: done exit $ex + + docker_build: + runs-on: ubuntu-20.04 + strategy: + matrix: + component: + - controller + - policy-controller + - proxy + name: Docker build (${{ matrix.component }}) + timeout-minutes: 30 + steps: + - name: Checkout code + uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 + - uses: ./.github/actions/docker-build + with: + docker-registry: ghcr.io/linkerd + docker-target: linux-amd64 + component: ${{ matrix.component }} + # TAG is set by docker-build + - run: echo $TAG + - name: Create artifact with CLI and image archives + run: | + mkdir -p /home/runner/archives + docker save "ghcr.io/linkerd/${{ matrix.component }}:$TAG" \ + >/home/runner/archives/${{ matrix.component }}.tar + - name: Upload artifact + uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 + with: + name: image-archives + path: /home/runner/archives + + integration: + needs: [docker_build] + name: Policy controller integration + runs-on: ubuntu-20.04 + timeout-minutes: 20 + steps: + - name: Checkout code + uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 + + - uses: actions/setup-go@f6164bd8c8acb4a71fb2791a8b6c4024ff038dab + with: + go-version: '1.17' + - name: Build the Linkerd CLI + run: bin/linkerd version --short --client + + - run: bin/scurl -v https://raw.githubusercontent.com/k3d-io/k3d/${K3D_VERSION}/install.sh | bash + - run: k3d --version + - run: k3d cluster create --no-lb --k3s-arg "--no-deploy=local-storage,traefik,servicelb,metrics-server@server:*" + - run: kubectl version + + - name: Download image archives + uses: actions/download-artifact@fb598a63ae348fa914e94cd0ff38f362e927b741 + with: + name: image-archives + path: image-archives + - name: Load images + run: | + docker load > $GITHUB_ENV + cargo version + + - name: Install cargo-action-fmt + run: | + bin/scurl -o /usr/local/bin/cargo-action-fmt "https://github.com/olix0r/cargo-action-fmt/releases/download/release%2F${CARGO_ACTION_FMT_VERSION}/cargo-action-fmt-x86_64-unknown-linux-gnu" + chmod 755 /usr/local/bin/cargo-action-fmt + + - run: cargo fetch --locked + - run: cargo test -p linkerd-policy-test --frozen --no-run | cargo-action-fmt + - run: cargo test -p linkerd-policy-test --frozen diff --git a/Cargo.lock b/Cargo.lock index 23dea112f7120..13d756937005b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -22,6 +22,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "anyhow" version = "1.0.55" @@ -921,6 +930,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "linkerd-policy-test" +version = "0.1.0" +dependencies = [ + "anyhow", + "k8s-openapi", + "kube", + "linkerd-policy-controller-k8s-api", + "rand", + "serde", + "tokio", + "tokio-test", + "tracing", + "tracing-subscriber", +] + [[package]] name = "linkerd2-proxy-api" version = "0.3.1" @@ -1822,6 +1847,19 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-test" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + [[package]] name = "tokio-util" version = "0.6.9" @@ -2017,6 +2055,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e0ab7bdc962035a87fba73f3acca9b8a8d0034c2e6f60b84aeaaddddc155dce" dependencies = [ + "ansi_term", "lazy_static", "matchers", "regex", diff --git a/Cargo.toml b/Cargo.toml index aa3136c621137..b3408a983d193 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,11 @@ [workspace] -resolver = "2" members = [ "policy-controller", "policy-controller/core", "policy-controller/grpc", "policy-controller/k8s/api", "policy-controller/k8s/index", + "policy-test", ] [profile.release] diff --git a/policy-controller/amd64.dockerfile b/policy-controller/amd64.dockerfile index 22be048573b53..a0688e2697391 100644 --- a/policy-controller/amd64.dockerfile +++ b/policy-controller/amd64.dockerfile @@ -7,9 +7,10 @@ ARG TARGETARCH WORKDIR /build COPY Cargo.toml Cargo.lock . COPY policy-controller policy-controller +RUN cargo new policy-test --lib RUN --mount=type=cache,target=target \ --mount=type=cache,from=rust:1.59.0,source=/usr/local/cargo,target=/usr/local/cargo \ - cargo fetch --locked + cargo fetch RUN --mount=type=cache,target=target \ --mount=type=cache,from=rust:1.59.0,source=/usr/local/cargo,target=/usr/local/cargo \ cargo build --frozen --target=x86_64-unknown-linux-gnu --release --package=linkerd-policy-controller && \ diff --git a/policy-controller/arm.dockerfile b/policy-controller/arm.dockerfile index 67558492258c0..cf5c3e545bf9b 100644 --- a/policy-controller/arm.dockerfile +++ b/policy-controller/arm.dockerfile @@ -10,9 +10,10 @@ ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc WORKDIR /build COPY Cargo.toml Cargo.lock . COPY policy-controller policy-controller +RUN cargo new policy-test --lib RUN --mount=type=cache,target=target \ --mount=type=cache,from=rust:1.59.0,source=/usr/local/cargo,target=/usr/local/cargo \ - cargo fetch --locked + cargo fetch # XXX(ver) we can't easily cross-compile against openssl, so use rustls on arm. RUN --mount=type=cache,target=target \ --mount=type=cache,from=rust:1.59.0,source=/usr/local/cargo,target=/usr/local/cargo \ diff --git a/policy-controller/arm64.dockerfile b/policy-controller/arm64.dockerfile index 26e3e69595d77..9b1b7c4727ee6 100644 --- a/policy-controller/arm64.dockerfile +++ b/policy-controller/arm64.dockerfile @@ -10,9 +10,10 @@ ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc WORKDIR /build COPY Cargo.toml Cargo.lock . COPY policy-controller policy-controller +RUN cargo new policy-test --lib RUN --mount=type=cache,target=target \ --mount=type=cache,from=rust:1.59.0,source=/usr/local/cargo,target=/usr/local/cargo \ - cargo fetch --locked + cargo fetch # XXX(ver) we can't easily cross-compile against openssl, so use rustls on arm. RUN --mount=type=cache,target=target \ --mount=type=cache,from=rust:1.59.0,source=/usr/local/cargo,target=/usr/local/cargo \ diff --git a/policy-controller/k8s/api/src/labels.rs b/policy-controller/k8s/api/src/labels.rs index 3c5bbb4335d4d..76abcea0a8fe4 100644 --- a/policy-controller/k8s/api/src/labels.rs +++ b/policy-controller/k8s/api/src/labels.rs @@ -61,6 +61,16 @@ impl Selector { } } + /// Indicates whether this label selector matches all pods + pub fn selects_all(&self) -> bool { + match (self.match_labels.as_ref(), self.match_expressions.as_ref()) { + (None, None) => true, + (Some(l), None) => l.is_empty(), + (None, Some(e)) => e.is_empty(), + (Some(l), Some(e)) => l.is_empty() && e.is_empty(), + } + } + pub fn matches(&self, labels: &Labels) -> bool { for expr in self.match_expressions.iter().flatten() { if !expr.matches(labels.as_ref()) { diff --git a/policy-controller/src/admission.rs b/policy-controller/src/admission.rs index 5fc0c1bbbb4ce..2111b2a9b3917 100644 --- a/policy-controller/src/admission.rs +++ b/policy-controller/src/admission.rs @@ -4,14 +4,13 @@ use api::policy::ServerSpec; use futures::future; use hyper::{body::Buf, http, Body, Request, Response}; use kube::{core::DynamicObject, ResourceExt}; -use linkerd_policy_controller_k8s_index::{Index, SharedIndex}; use std::task; use thiserror::Error; use tracing::{debug, info, warn}; #[derive(Clone)] -pub struct Service { - pub index: SharedIndex, +pub struct Admission { + client: kube::Client, } #[derive(Debug, Error)] @@ -23,7 +22,7 @@ pub enum Error { Json(#[from] serde_json::Error), } -impl hyper::service::Service> for Service { +impl hyper::service::Service> for Admission { type Response = Response; type Error = Error; type Future = future::BoxFuture<'static, Result, Error>>; @@ -42,7 +41,7 @@ impl hyper::service::Service> for Service { )); } - let index = self.index.clone(); + let admission = self.clone(); Box::pin(async move { let bytes = hyper::body::aggregate(req.into_body()).await?; let review: Review = match serde_json::from_reader(bytes.reader()) { @@ -75,7 +74,7 @@ impl hyper::service::Service> for Service { }; // If validation fails, deny admission. - let rsp = match validate(&ns, &name, &review_spec, &*index.read()) { + let rsp = match admission.validate(&ns, &name, review_spec).await { Ok(()) => rsp, Err(error) => { info!(%error, %ns, %name, "Denying server"); @@ -88,6 +87,48 @@ impl hyper::service::Service> for Service { } } +impl Admission { + pub fn new(client: kube::Client) -> Self { + Self { client } + } + + /// Checks that `spec` doesn't select the same pod/ports as other existing Servers + // + // TODO(ver) this isn't rigorous about detecting servers that select the same port if one port + // specifies a numeric port and the other specifies the port's name. + async fn validate(self, ns: &str, name: &str, spec: api::policy::ServerSpec) -> Result<()> { + // Since we can't ensure that the local index is up-to-date with the API server (i.e. + // updates may be delayed), we issue an API request to get the latest state of servers in + // the namespace. + let servers = kube::Api::::namespaced(self.client, ns) + .list(&kube::api::ListParams::default()) + .await?; + for server in servers.items.into_iter() { + if server.name() != name + && server.spec.port == spec.port + && overlaps(&server.spec.pod_selector, &spec.pod_selector) + { + bail!("identical server spec already exists"); + } + } + + Ok(()) + } +} + +/// Detects whether two pod selectors can select the same pod +// +// TODO(ver) We can probably detect overlapping selectors more effectively. For example, if `left` +// selects pods with 'foo=bar' and `right` selects pods with 'foo', we should indicate the selectors +// overlap. It's a bit tricky to work through all of the cases though, so we'll just punt for now. +fn overlaps(left: &api::labels::Selector, right: &api::labels::Selector) -> bool { + if left.selects_all() || right.selects_all() { + return true; + } + + left == right +} + fn json_response(rsp: AdmissionReview) -> Result, Error> { let bytes = serde_json::to_vec(&rsp)?; Ok(Response::builder() @@ -117,25 +158,3 @@ fn parse_server(req: AdmissionRequest) -> Result<(String, String, api::policy::S let spec = serde_json::from_value::(data)?; Ok((ns, name, spec)) } - -/// Validates a new server (`review`) against existing `servers`. -fn validate(ns: &str, name: &str, spec: &api::policy::ServerSpec, index: &Index) -> Result<()> { - if let Some(nsidx) = index.get_ns(ns) { - for (srvname, srv) in nsidx.servers.iter() { - // If the port and pod selectors select the same resources, fail the admission of the - // server. Ignore existing instances of this Server (e.g., if the server's metadata is - // changing). - if srvname != name - // TODO(ver) this isn't rigorous about detecting servers that select the same port if one port - // specifies a numeric port and the other specifies the port's name. - && *srv.port() == spec.port - // TODO(ver) We can probably detect overlapping selectors more effectively. - && *srv.pod_selector() == spec.pod_selector - { - bail!("identical server spec already exists"); - } - } - } - - Ok(()) -} diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index a8e129af65e84..631fbc5d3c7f7 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -1,8 +1,9 @@ #![deny(warnings, rust_2018_idioms)] #![forbid(unsafe_code)] -pub mod admission; +mod admission; +pub use self::admission::Admission; pub use linkerd_policy_controller_grpc as grpc; pub use linkerd_policy_controller_k8s_api as api; pub use linkerd_policy_controller_k8s_index as k8s; diff --git a/policy-controller/src/main.rs b/policy-controller/src/main.rs index c0c1f946571ca..eda239156da3c 100644 --- a/policy-controller/src/main.rs +++ b/policy-controller/src/main.rs @@ -5,7 +5,7 @@ use anyhow::{bail, Result}; use clap::Parser; use futures::prelude::*; use kube::api::ListParams; -use linkerd_policy_controller::{admission, k8s}; +use linkerd_policy_controller::{k8s, Admission}; use linkerd_policy_controller_core::IpNet; use std::net::SocketAddr; use tokio::time; @@ -118,7 +118,7 @@ async fn main() -> Result<()> { let serverauthorizations = runtime.watch_all(ListParams::default()); tokio::spawn( - k8s::index_serverauthorizations(index.clone(), serverauthorizations) + k8s::index_serverauthorizations(index, serverauthorizations) .instrument(info_span!("serverauthorizations")), ); @@ -130,7 +130,8 @@ async fn main() -> Result<()> { runtime.shutdown_handle(), )); - let runtime = runtime.spawn_server(|| admission::Service { index }); + let client = runtime.client(); + let runtime = runtime.spawn_server(|| Admission::new(client)); // Block the main thread on the shutdown signal. Once it fires, wait for the background tasks to // complete before exiting. diff --git a/policy-test/Cargo.toml b/policy-test/Cargo.toml new file mode 100644 index 0000000000000..12817015728b6 --- /dev/null +++ b/policy-test/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "linkerd-policy-test" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" +publish = false + +[dependencies] +anyhow = "1" +kube = { version = "0.69.1", default-features = false, features = ["client", "native-tls"] } +k8s-openapi = { version = "0.14", features = ["v1_20"] } +linkerd-policy-controller-k8s-api = { path = "../policy-controller/k8s/api" } +rand = "0.8" +serde = "1" +tokio = { version = "1", features = ["macros", "rt"] } +tracing = "0.1" +tracing-subscriber = "0.3" + +[dev-dependencies] +tokio-test = "0.4" diff --git a/policy-test/README.md b/policy-test/README.md new file mode 100644 index 0000000000000..c35eeb3c417d7 --- /dev/null +++ b/policy-test/README.md @@ -0,0 +1,46 @@ +# Policy controller tests + +The `policy-test` crate includes integration tests for the policy controller. + +## Running locally + +### 1. Create a cluster + +The tests run against the default Kubernetes context. You can quickly create a +local cluster with a command like: + +```sh +:; k3d cluster create --no-lb --k3s-arg '--disable=servicelb,traefik@server:0' +``` + +### 2. Build and install (or upgrade) the core control-plane + +The tests require that a Linkerd control plane be installed in the cluster. The +tests create resource in the target cluster and validate that the policy +controller responds as expected. + +You can deploy a development version of the control plane in a local k3d cluster +with: + +```sh +:; bin/docker-build-policy-controller &; \ + bin/docker-build-controller &; \ + bin/docker-build-proxy &; \ + wait && wait && wait && \ + bin/image-load --k3d policy-controller controller proxy && \ + rm -rf target/cli && \ + bin/linkerd install --set 'policyController.logLevel=info\,linkerd=trace\,kubert=trace' \ + | kubectl apply -f - +``` + +### 3. Run tests + +The tests will create and delete temporary namespaces for each test. + +```sh +:; cargo test -p linkerd-policy-test +``` + +## Running in CI + +See the [workflow](.github/workflows/policy_controller.yml). diff --git a/policy-test/src/admission.rs b/policy-test/src/admission.rs new file mode 100644 index 0000000000000..3bc2fceb5e9e5 --- /dev/null +++ b/policy-test/src/admission.rs @@ -0,0 +1,43 @@ +use crate::with_temp_ns; + +pub async fn accepts(f: F) +where + F: FnOnce(String) -> T + Send + 'static, + T: Clone + + Send + + Sync + + std::fmt::Debug + + kube::Resource + + serde::de::DeserializeOwned + + serde::Serialize, + T::DynamicType: Default, +{ + with_temp_ns(|client, ns| async move { + let api = kube::Api::namespaced(client, &*ns); + let obj = f(ns); + let res = api.create(&kube::api::PostParams::default(), &obj).await; + res.expect("resource must apply"); + }) + .await; +} + +pub async fn rejects(f: F) +where + F: FnOnce(String) -> T + Send + 'static, + T: Clone + + Send + + Sync + + std::fmt::Debug + + kube::Resource + + serde::de::DeserializeOwned + + serde::Serialize, + T::DynamicType: Default, +{ + with_temp_ns(|client, ns| async move { + let api = kube::Api::namespaced(client, &*ns); + let obj = f(ns); + let res = api.create(&kube::api::PostParams::default(), &obj).await; + res.expect_err("resource must not apply"); + }) + .await; +} diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs new file mode 100644 index 0000000000000..b7ebc76799d67 --- /dev/null +++ b/policy-test/src/lib.rs @@ -0,0 +1,101 @@ +#![deny(warnings, rust_2018_idioms)] +#![forbid(unsafe_code)] + +pub mod admission; + +use linkerd_policy_controller_k8s_api::{self as api}; +use rand::Rng; +use tracing::Instrument; + +/// Runs a test with a random namespace that is deleted on test completion +pub async fn with_temp_ns(test: F) +where + F: FnOnce(kube::Client, String) -> Fut, + Fut: std::future::Future + Send + 'static, +{ + let _tracing = init_tracing(); + + let namespace = { + // TODO(ver) include the test name in this string? + let rng = &mut rand::thread_rng(); + let sfx = (0..6) + .map(|_| rng.sample(LowercaseAlphanumeric) as char) + .collect::(); + format!("linkerd-policy-test-{}", sfx) + }; + + tracing::debug!("initializing client"); + let client = kube::Client::try_default() + .await + .expect("failed to initialize k8s client"); + let api = kube::Api::::all(client.clone()); + + tracing::debug!(%namespace, "creating"); + let ns = api::Namespace { + metadata: api::ObjectMeta { + name: Some(namespace.clone()), + ..Default::default() + }, + ..Default::default() + }; + api.create(&kube::api::PostParams::default(), &ns) + .await + .expect("failed to create Namespace"); + + tracing::trace!("spawning"); + let test = test(client.clone(), namespace.clone()); + let res = tokio::spawn(test.instrument(tracing::info_span!("test", %namespace))).await; + if res.is_err() { + // If the test failed, stop tracing so the log is not polluted with more information about + // cleanup after the failure was printed. + drop(_tracing); + } + + tracing::debug!(%namespace, "deleting"); + api.delete(&namespace, &kube::api::DeleteParams::background()) + .await + .expect("failed to delete Namespace"); + if let Err(err) = res { + std::panic::resume_unwind(err.into_panic()); + } +} + +fn init_tracing() -> tracing::subscriber::DefaultGuard { + tracing::subscriber::set_default( + tracing_subscriber::fmt() + .with_test_writer() + .with_max_level(tracing::Level::TRACE) + .finish(), + ) +} + +struct LowercaseAlphanumeric; + +// Modified from `rand::distributions::Alphanumeric` +// +// Copyright 2018 Developers of the Rand project +// Copyright (c) 2014 The Rust Project Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +impl rand::distributions::Distribution for LowercaseAlphanumeric { + fn sample(&self, rng: &mut R) -> u8 { + const RANGE: u32 = 26 + 10; + const CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyz0123456789"; + loop { + let var = rng.next_u32() >> (32 - 6); + if var < RANGE { + return CHARSET[var as usize]; + } + } + } +} diff --git a/policy-test/tests/admit_server.rs b/policy-test/tests/admit_server.rs new file mode 100644 index 0000000000000..b23812c64d4e2 --- /dev/null +++ b/policy-test/tests/admit_server.rs @@ -0,0 +1,134 @@ +use api::policy::server::ProxyProtocol; +use linkerd_policy_controller_k8s_api::{ + self as api, + policy::server::{Port, Server, ServerSpec}, +}; +use linkerd_policy_test::{admission, with_temp_ns}; + +#[tokio::test(flavor = "current_thread")] +async fn accepts_valid() { + admission::accepts(|ns| Server { + metadata: api::ObjectMeta { + namespace: Some(ns), + name: Some("test".to_string()), + ..Default::default() + }, + spec: ServerSpec { + pod_selector: api::labels::Selector::default(), + port: Port::Number(80), + proxy_protocol: None, + }, + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn accepts_server_updates() { + with_temp_ns(|client, ns| async move { + let test0 = Server { + metadata: api::ObjectMeta { + namespace: Some(ns.clone()), + name: Some("test0".to_string()), + ..Default::default() + }, + spec: ServerSpec { + pod_selector: api::labels::Selector::from_iter(Some(("app", "test"))), + port: Port::Number(80), + proxy_protocol: None, + }, + }; + + let api = kube::Api::namespaced(client, &*ns); + api.create(&kube::api::PostParams::default(), &test0) + .await + .expect("resource must apply"); + + api.patch( + "test0", + &kube::api::PatchParams::default(), + &kube::api::Patch::Merge(test0), + ) + .await + .expect("resource must apply"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn rejects_identitical_pod_selector() { + with_temp_ns(|client, ns| async move { + let spec = ServerSpec { + pod_selector: api::labels::Selector::from_iter(Some(("app", "test"))), + port: Port::Number(80), + proxy_protocol: None, + }; + + let api = kube::Api::namespaced(client, &*ns); + + let test0 = Server { + metadata: api::ObjectMeta { + namespace: Some(ns.clone()), + name: Some("test0".to_string()), + ..Default::default() + }, + spec: spec.clone(), + }; + api.create(&kube::api::PostParams::default(), &test0) + .await + .expect("resource must apply"); + + let test1 = Server { + metadata: api::ObjectMeta { + namespace: Some(ns), + name: Some("test1".to_string()), + ..Default::default() + }, + spec, + }; + api.create(&kube::api::PostParams::default(), &test1) + .await + .expect_err("resource must not apply"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn rejects_all_pods_selected() { + with_temp_ns(|client, ns| async move { + let api = kube::Api::namespaced(client, &*ns); + + let test0 = Server { + metadata: api::ObjectMeta { + namespace: Some(ns.clone()), + name: Some("test0".to_string()), + ..Default::default() + }, + spec: ServerSpec { + pod_selector: api::labels::Selector::from_iter(Some(("app", "test"))), + port: Port::Number(80), + proxy_protocol: Some(ProxyProtocol::Http2), + }, + }; + api.create(&kube::api::PostParams::default(), &test0) + .await + .expect("resource must apply"); + + let test1 = Server { + metadata: api::ObjectMeta { + namespace: Some(ns), + name: Some("test1".to_string()), + ..Default::default() + }, + spec: ServerSpec { + pod_selector: api::labels::Selector::default(), + port: Port::Number(80), + // proxy protocol doesn't factor into the selection + proxy_protocol: Some(ProxyProtocol::Http1), + }, + }; + api.create(&kube::api::PostParams::default(), &test1) + .await + .expect_err("resource must not apply"); + }) + .await; +}