From 9a1c00444e96aa7fda1a2627e025e8bbcd7c12a0 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Wed, 22 Jan 2025 20:42:51 -0800 Subject: [PATCH 01/11] Check for setup charts before deploying Signed-off-by: Robert Detjens --- src/cluster_setup/mod.rs | 9 ++-- src/commands/deploy.rs | 111 ++++++++++++++++++++++++++++++++++++++- src/commands/mod.rs | 3 ++ src/deploy/mod.rs | 0 src/lib.rs | 1 + 5 files changed, 119 insertions(+), 5 deletions(-) create mode 100644 src/deploy/mod.rs diff --git a/src/cluster_setup/mod.rs b/src/cluster_setup/mod.rs index c5c2795..cb9a19e 100644 --- a/src/cluster_setup/mod.rs +++ b/src/cluster_setup/mod.rs @@ -27,6 +27,9 @@ use crate::configparser::{config, get_config, get_profile_config}; // Some components can or must be deployed and configured ahead of time, like // the ingress controller, cert-manager, and external-dns +// install these charts into this namespace +pub const INGRESS_NAMESPACE: &str = "ingress"; + pub async fn install_ingress(profile: &config::ProfileConfig) -> Result<()> { info!("deploying ingress-nginx chart..."); @@ -38,7 +41,7 @@ pub async fn install_ingress(profile: &config::ProfileConfig) -> Result<()> { "ingress-nginx", Some("https://kubernetes.github.io/ingress-nginx"), "ingress-nginx", - "ingress", + INGRESS_NAMESPACE, VALUES, ) .context("failed to install ingress-nginx helm chart") @@ -55,7 +58,7 @@ pub async fn install_certmanager(profile: &config::ProfileConfig) -> Result<()> "cert-manager", Some("https://charts.jetstack.io"), "cert-manager", - "ingress", + INGRESS_NAMESPACE, VALUES, )?; @@ -87,7 +90,7 @@ pub async fn install_extdns(profile: &config::ProfileConfig) -> Result<()> { "oci://registry-1.docker.io/bitnamicharts/external-dns", None, "external-dns", - "ingress", + INGRESS_NAMESPACE, &values, ) } diff --git a/src/commands/deploy.rs b/src/commands/deploy.rs index 890f095..242b593 100644 --- a/src/commands/deploy.rs +++ b/src/commands/deploy.rs @@ -1,3 +1,110 @@ -pub fn run(_profile: &str, _no_build: &bool, _dry_run: &bool) { - println!("running deploy!"); +use anyhow::{anyhow, bail, Context, Error, Result}; +use itertools::Itertools; +use k8s_openapi::api::core::v1::Secret; +use kube::api::ListParams; +use simplelog::*; +use std::env::current_exe; +use std::process::exit; + +use crate::clients::kube_client; +use crate::cluster_setup::{self as setup, INGRESS_NAMESPACE}; +use crate::configparser::config::ProfileConfig; +use crate::configparser::{get_config, get_profile_config}; + +#[tokio::main(flavor = "current_thread")] // make this a sync function +pub async fn run(profile_name: &str, _no_build: &bool, _dry_run: &bool) { + info!("deploying challenges..."); + + let profile = get_profile_config(profile_name).unwrap(); + + // has the cluster been setup? + if let Err(e) = check_setup(profile).await { + error!("{e:?}"); + exit(1); + } +} + +/// check to make sure that the +async fn check_setup(profile: &ProfileConfig) -> Result<()> { + let kube = kube_client(profile).await?; + let secrets: kube::Api = kube::Api::namespaced(kube, setup::INGRESS_NAMESPACE); + + let all_releases = secrets + .list_metadata(&ListParams::default().labels("owner=helm")) + .await?; + + // pull helm release version from secret label + macro_rules! helm_version { + ($s:ident) => { + $s.get("version") + .unwrap_or(&"".to_string()) + .parse::() + .unwrap_or(0) + }; + } + let expected_charts = ["ingress-nginx", "cert-manager", "external-dns"]; + let latest_releases = expected_charts + .iter() + .map(|chart| { + // pick latest release + all_releases + .iter() + .map(|r| r.metadata.labels.as_ref().unwrap()) + .filter(|rl| rl.get("name") == Some(&chart.to_string())) + .max_by(|a, b| helm_version!(a).cmp(&helm_version!(b))) + }) + .collect_vec(); + + enum ChartFailure { + Missing(String), + DeploymentFailed(String), + } + + // make sure all releases are present and deployed successfully + let missing = latest_releases + .iter() + .zip(expected_charts) + .filter_map(|(r, c)| { + // is label status=deployed ? + if r.is_none() { + return Some(ChartFailure::Missing(c.to_string())); + } + + if r.unwrap().get("status") == Some(&"deployed".to_string()) { + // all is good + None + } else { + Some(ChartFailure::DeploymentFailed(c.to_string())) + } + }) + .collect_vec(); + + if !missing.is_empty() { + // if any errors are present, collect/reduce them all into one error via + // anyhow context() calls. TODO: should this be in run() to present + // errors there instead of chaining and returning one combined Error + // here? + missing + .iter() + .fold(Err(anyhow!("")), |e, reason| match reason { + ChartFailure::Missing(c) => { + e.with_context(|| format!("chart {}/{c} is not deployed", INGRESS_NAMESPACE)) + } + ChartFailure::DeploymentFailed(c) => e.with_context(|| { + format!("chart {}/{c} is in a failed state", INGRESS_NAMESPACE) + }), + }) + .with_context(|| { + format!( + "cluster has not been set up with needed charts (run `{} cluster-setup`)", + current_exe() + .unwrap() + .file_name() + .unwrap_or_default() + .to_string_lossy() + ) + }) + } else { + Ok(()) + } } diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 4b39abf..62beb26 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -3,3 +3,6 @@ pub mod check_access; pub mod cluster_setup; pub mod deploy; pub mod validate; + +// These modules should not do much and act mostly as a thunk to handle +// displaying outputs/errors of the real function. diff --git a/src/deploy/mod.rs b/src/deploy/mod.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/lib.rs b/src/lib.rs index 3178fd2..9ac8b4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,6 +8,7 @@ pub mod clients; pub mod cluster_setup; pub mod commands; pub mod configparser; +pub mod deploy; #[cfg(test)] mod tests; From ad1f267b6ffd267fd8ada38cd6816bd41a935441 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Sun, 26 Jan 2025 14:32:04 -0800 Subject: [PATCH 02/11] Disable false-positive clippy warning for chart check Signed-off-by: Robert Detjens --- src/commands/deploy.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/commands/deploy.rs b/src/commands/deploy.rs index 242b593..1658deb 100644 --- a/src/commands/deploy.rs +++ b/src/commands/deploy.rs @@ -7,7 +7,7 @@ use std::env::current_exe; use std::process::exit; use crate::clients::kube_client; -use crate::cluster_setup::{self as setup, INGRESS_NAMESPACE}; +use crate::cluster_setup as setup; use crate::configparser::config::ProfileConfig; use crate::configparser::{get_config, get_profile_config}; @@ -24,7 +24,7 @@ pub async fn run(profile_name: &str, _no_build: &bool, _dry_run: &bool) { } } -/// check to make sure that the +/// check to make sure that the needed ingress charts are deployed and running async fn check_setup(profile: &ProfileConfig) -> Result<()> { let kube = kube_client(profile).await?; let secrets: kube::Api = kube::Api::namespaced(kube, setup::INGRESS_NAMESPACE); @@ -84,14 +84,18 @@ async fn check_setup(profile: &ProfileConfig) -> Result<()> { // anyhow context() calls. TODO: should this be in run() to present // errors there instead of chaining and returning one combined Error // here? + #[allow(clippy::manual_try_fold)] // need to build the Result ourselves missing .iter() .fold(Err(anyhow!("")), |e, reason| match reason { - ChartFailure::Missing(c) => { - e.with_context(|| format!("chart {}/{c} is not deployed", INGRESS_NAMESPACE)) - } + ChartFailure::Missing(c) => e.with_context(|| { + format!("chart {}/{c} is not deployed", setup::INGRESS_NAMESPACE) + }), ChartFailure::DeploymentFailed(c) => e.with_context(|| { - format!("chart {}/{c} is in a failed state", INGRESS_NAMESPACE) + format!( + "chart {}/{c} is in a failed state", + setup::INGRESS_NAMESPACE + ) }), }) .with_context(|| { From 0edcfa7520c9c62209d8a5f112e8a822a9945bdc Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Sun, 26 Jan 2025 14:33:14 -0800 Subject: [PATCH 03/11] Move S3 client methods to shared clients file Signed-off-by: Robert Detjens --- src/access_handlers/s3.rs | 35 +---------------------------------- src/clients.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/src/access_handlers/s3.rs b/src/access_handlers/s3.rs index f1ed323..c50888f 100644 --- a/src/access_handlers/s3.rs +++ b/src/access_handlers/s3.rs @@ -3,6 +3,7 @@ use s3; use simplelog::*; use tokio; +use crate::clients::{bucket_client, bucket_client_anonymous}; use crate::configparser::{ config::{ProfileConfig, S3Config}, get_config, get_profile_config, @@ -60,37 +61,3 @@ pub async fn check(profile_name: &str) -> Result<()> { Ok(()) } - -/// create bucket client for passed profile config -pub fn bucket_client(config: &S3Config) -> Result> { - trace!("creating bucket client"); - // TODO: once_cell this so it reuses the same bucket? - let region = s3::Region::Custom { - region: config.region.clone(), - endpoint: config.endpoint.clone(), - }; - let creds = s3::creds::Credentials::new( - Some(&config.access_key), - Some(&config.secret_key), - None, - None, - None, - )?; - let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); - - Ok(bucket) -} - -/// create public/anonymous bucket client for passed profile config -pub fn bucket_client_anonymous(config: &S3Config) -> Result> { - trace!("creating anon bucket client"); - // TODO: once_cell this so it reuses the same bucket? - let region = s3::Region::Custom { - region: config.region.clone(), - endpoint: config.endpoint.clone(), - }; - let creds = s3::creds::Credentials::anonymous()?; - let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); - - Ok(bucket) -} diff --git a/src/clients.rs b/src/clients.rs index 2b26b45..88230a1 100644 --- a/src/clients.rs +++ b/src/clients.rs @@ -9,6 +9,7 @@ use kube::{ core::ResourceExt, discovery::{ApiCapabilities, ApiResource, Discovery, Scope}, }; +use s3; use simplelog::*; use crate::configparser::config; @@ -49,6 +50,44 @@ pub async fn engine_type() -> EngineType { } } +// +// S3 stuff +// + +/// create bucket client for passed profile config +pub fn bucket_client(config: &config::S3Config) -> Result> { + trace!("creating bucket client"); + // TODO: once_cell this so it reuses the same bucket? + let region = s3::Region::Custom { + region: config.region.clone(), + endpoint: config.endpoint.clone(), + }; + let creds = s3::creds::Credentials::new( + Some(&config.access_key), + Some(&config.secret_key), + None, + None, + None, + )?; + let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); + + Ok(bucket) +} + +/// create public/anonymous bucket client for passed profile config +pub fn bucket_client_anonymous(config: &config::S3Config) -> Result> { + trace!("creating anon bucket client"); + // TODO: once_cell this so it reuses the same bucket? + let region = s3::Region::Custom { + region: config.region.clone(), + endpoint: config.endpoint.clone(), + }; + let creds = s3::creds::Credentials::anonymous()?; + let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); + + Ok(bucket) +} + // // Kubernetes stuff // From e40f1bd740cafdbea99bb72c273429081e8c052f Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Sun, 26 Jan 2025 14:34:18 -0800 Subject: [PATCH 04/11] Return both asset files and images tags from challenge builder The deploy command will need both of these to template out the manifests. Signed-off-by: Robert Detjens --- src/builder/mod.rs | 39 +++++++++++++++++++++++++++++---------- src/commands/build.rs | 4 ++-- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 7801b1a..857612c 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -9,7 +9,7 @@ use simplelog::*; use std::default; use std::fmt::Pointer; use std::iter::zip; -use std::path::Path; +use std::path::{Path, PathBuf}; use crate::configparser::challenge::{ BuildObject, ChallengeConfig, ImageSource::*, Pod, ProvideConfig, @@ -28,16 +28,26 @@ macro_rules! image_tag_str { }; } +/// Information about all of a challenge's build artifacts. +#[derive(Debug)] +pub struct BuildResult { + /// Container image tags of all containers in the challenge, if the challenge has container images. + /// Will be empty if challenge has no images built from source. + tags: Vec, + /// Path on disk to local assets (both built and static). + /// Will be empty if challenge has no file assets + assets: Vec, +} + /// Build all enabled challenges for the given profile. Returns tags built pub fn build_challenges( profile_name: &str, push: bool, extract_artifacts: bool, -) -> Result> { +) -> Result> { enabled_challenges(profile_name)? .iter() .map(|chal| build_challenge(profile_name, chal, push, extract_artifacts)) - .flatten_ok() .collect::>() } @@ -47,15 +57,22 @@ fn build_challenge( chal: &ChallengeConfig, push: bool, extract_artifacts: bool, -) -> Result> { +) -> Result { debug!("building images for chal {:?}", chal.directory); let config = get_config()?; - let built_tags: Vec<_> = chal + let mut built = BuildResult { + tags: vec![], + assets: vec![], + }; + + built.tags = chal .pods .iter() .filter_map(|p| match &p.image_source { + // ignore any pods that use existing images Image(_) => None, + // build any pods that need building Build(b) => { let tag = format!( image_tag_str!(), @@ -80,11 +97,12 @@ fn build_challenge( if push { debug!( "pushing {} tags for chal {:?}", - built_tags.len(), + built.tags.len(), chal.directory ); - built_tags + built + .tags .iter() .map(|tag| { docker::push_image(tag, &config.registry.build) @@ -116,7 +134,7 @@ fn build_challenge( }) .collect_vec(); - let assets = image_assoc + built.assets = image_assoc .into_iter() .map(|(p, tag)| { let name = format!( @@ -142,7 +160,8 @@ fn build_challenge( .flatten_ok() .collect::>>()?; - info!("extracted artifacts: {:?}", assets); + info!("extracted artifacts: {:?}", built.assets); } - Ok(built_tags) + + Ok(built) } diff --git a/src/commands/build.rs b/src/commands/build.rs index b6e89d7..d4ce2ad 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -8,8 +8,8 @@ use crate::configparser::{get_config, get_profile_config}; pub fn run(profile_name: &str, push: &bool, extract: &bool) { info!("building images..."); - let tags = match build_challenges(profile_name, *push, *extract) { - Ok(tags) => tags, + let results = match build_challenges(profile_name, *push, *extract) { + Ok(results) => results, Err(e) => { error!("{e:?}"); exit(1) From 4bc1f2b58b2b3bc6f0136c575a499c25625734ed Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Sun, 26 Jan 2025 18:33:15 -0800 Subject: [PATCH 05/11] collect both built and upstream tags in build results Signed-off-by: Robert Detjens --- src/builder/mod.rs | 78 ++++++++++++++++++----------------- src/commands/deploy.rs | 10 +++-- src/configparser/challenge.rs | 23 +++++++++++ src/deploy/mod.rs | 37 +++++++++++++++++ 4 files changed, 108 insertions(+), 40 deletions(-) diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 857612c..ab6d41c 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -27,18 +27,26 @@ macro_rules! image_tag_str { "{registry}/{challenge}-{container}:{profile}" }; } +pub(super) use image_tag_str; /// Information about all of a challenge's build artifacts. #[derive(Debug)] pub struct BuildResult { /// Container image tags of all containers in the challenge, if the challenge has container images. /// Will be empty if challenge has no images built from source. - tags: Vec, + tags: Vec, /// Path on disk to local assets (both built and static). /// Will be empty if challenge has no file assets assets: Vec, } +/// Tag string with added context of where it came from (built locally or an upstream image) +#[derive(Debug)] +pub enum TagWithSource { + Upstream(String), + Built(String), +} + /// Build all enabled challenges for the given profile. Returns tags built pub fn build_challenges( profile_name: &str, @@ -69,40 +77,43 @@ fn build_challenge( built.tags = chal .pods .iter() - .filter_map(|p| match &p.image_source { - // ignore any pods that use existing images - Image(_) => None, + .map(|p| match &p.image_source { + Image(tag) => Ok(TagWithSource::Upstream(tag.to_string())), // build any pods that need building - Build(b) => { - let tag = format!( - image_tag_str!(), - registry = config.registry.domain, - challenge = chal.name, - container = p.name, - profile = profile_name - ); - Some( - docker::build_image(&chal.directory, b, &tag).with_context(|| { - format!( - "error building image {} for chal {}", - p.name, - chal.directory.to_string_lossy() - ) - }), - ) + Build(build) => { + let tag = chal.container_tag_for_pod(profile_name, &p.name)?; + + let res = docker::build_image(&chal.directory, build, &tag).with_context(|| { + format!( + "error building image {} for chal {}", + p.name, + chal.directory.to_string_lossy() + ) + }); + // map result tag string into enum + res.map(TagWithSource::Built) } }) .collect::>()?; if push { + // only need to push tags we actually built + let tags_to_push = built + .tags + .iter() + .filter_map(|t| match t { + TagWithSource::Built(t) => Some(t), + TagWithSource::Upstream(_) => None, + }) + .collect_vec(); + debug!( "pushing {} tags for chal {:?}", - built.tags.len(), + tags_to_push.len(), chal.directory ); - built - .tags + tags_to_push .iter() .map(|tag| { docker::push_image(tag, &config.registry.build) @@ -114,29 +125,22 @@ fn build_challenge( if extract_artifacts { info!("extracting build artifacts for chal {:?}", chal.directory); - // find the matching tag for Provide entries that have a `from:` source + // associate file `Provide` entries that have a `from:` source with their corresponding container image let image_assoc = chal .provide .iter() .filter_map(|p| { - p.from.as_ref().map(|f| { - ( - p, - format!( - image_tag_str!(), - registry = config.registry.domain, - challenge = chal.name, - container = f, - profile = profile_name - ), - ) - }) + p.from + .as_ref() + .map(|f| (p, chal.container_tag_for_pod(profile_name, f))) }) .collect_vec(); built.assets = image_assoc .into_iter() .map(|(p, tag)| { + let tag = tag?; + let name = format!( "asset-container-{}-{}", chal.directory.to_string_lossy().replace("/", "-"), diff --git a/src/commands/deploy.rs b/src/commands/deploy.rs index 1658deb..5f21383 100644 --- a/src/commands/deploy.rs +++ b/src/commands/deploy.rs @@ -22,6 +22,8 @@ pub async fn run(profile_name: &str, _no_build: &bool, _dry_run: &bool) { error!("{e:?}"); exit(1); } + + // let build_result = } /// check to make sure that the needed ingress charts are deployed and running @@ -81,9 +83,11 @@ async fn check_setup(profile: &ProfileConfig) -> Result<()> { if !missing.is_empty() { // if any errors are present, collect/reduce them all into one error via - // anyhow context() calls. TODO: should this be in run() to present - // errors there instead of chaining and returning one combined Error - // here? + // anyhow context() calls. + // + // TODO: this should probably be returning Vec instead of a + // single Error chain. should this be in run() to present errors there + // instead of chaining and returning one combined Error here? #[allow(clippy::manual_try_fold)] // need to build the Result ourselves missing .iter() diff --git a/src/configparser/challenge.rs b/src/configparser/challenge.rs index 193544c..5754160 100644 --- a/src/configparser/challenge.rs +++ b/src/configparser/challenge.rs @@ -12,8 +12,10 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use void::Void; +use crate::builder::image_tag_str; use crate::configparser::config::Resource; use crate::configparser::field_coersion::string_or_struct; +use crate::configparser::get_config; pub fn parse_all() -> Result, Vec> { // find all challenge.yaml files @@ -138,6 +140,27 @@ pub struct ChallengeConfig { #[serde(default)] pods: Vec, // optional if no containers used } +impl ChallengeConfig { + pub fn container_tag_for_pod(&self, profile_name: &str, pod_name: &str) -> Result { + let config = get_config()?; + let pod = self + .pods + .iter() + .find(|p| p.name == pod_name) + .ok_or(anyhow!("pod {} not found in challenge", pod_name))?; + + match &pod.image_source { + ImageSource::Image(t) => Ok(t.to_string()), + ImageSource::Build(b) => Ok(format!( + image_tag_str!(), + registry = config.registry.domain, + challenge = self.name, + container = pod.name, + profile = profile_name + )), + } + } +} fn default_difficulty() -> i64 { 1 diff --git a/src/deploy/mod.rs b/src/deploy/mod.rs index e69de29..55dd0f9 100644 --- a/src/deploy/mod.rs +++ b/src/deploy/mod.rs @@ -0,0 +1,37 @@ +use anyhow::{anyhow, bail, Context, Error, Result}; +use itertools::Itertools; +use simplelog::*; + +use crate::clients::{bucket_client, kube_client}; +use crate::cluster_setup as setup; +use crate::configparser::config::ProfileConfig; +use crate::configparser::{enabled_challenges, get_config, get_profile_config}; + +/// Render challenge manifest templates and apply to cluster +pub async fn deploy_challenges(profile_name: &str) -> Result<()> { + let profile = get_profile_config(profile_name)?; + + todo!() +} + +/// Upload files to frontend asset bucket +/// Returns urls of upload files. +pub async fn upload_assets(profile_name: &str) -> Result> { + let profile = get_profile_config(profile_name)?; + let enabled_challenges = enabled_challenges(profile_name)?; + + let bucket = bucket_client(&profile.s3)?; + + for chal in enabled_challenges {} + + todo!() + + // TODO: should uploaded URLs be a (generated) part of the challenge config + // struct? +} + +/// Sync deployed challenges with rCTF frontend +pub async fn update_frontend(profile_name: &str) -> Result<()> { + let profile = get_profile_config(profile_name)?; + todo!() +} From 37b7f57eabd5eac90d01f20960967a4e6eb660d3 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Sun, 26 Jan 2025 19:53:57 -0800 Subject: [PATCH 06/11] Move local file zipping to separate function Signed-off-by: Robert Detjens --- src/builder/artifacts.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs index d114730..2548776 100644 --- a/src/builder/artifacts.rs +++ b/src/builder/artifacts.rs @@ -105,21 +105,31 @@ async fn extract_archive( })) .await?; - // write them all to new zip - let zipfile = File::create(chal.directory.join(archive_name))?; + zip_files(&chal.directory.join(archive_name), copied_files)?; + + Ok(vec![chal.directory.join(archive_name)]) +} + +/// Add multiple local `files` to a zipfile at `zip_name` +fn zip_files(archive_name: &Path, files: Vec) -> Result { + debug!("creating zip at {:?}", archive_name); + let zipfile = File::create(archive_name)?; let mut z = zip::ZipWriter::new(zipfile); let opts = zip::write::SimpleFileOptions::default(); let mut buf = vec![]; - for path in copied_files.into_iter() { + for path in files.iter() { trace!("adding {:?} to zip", &path); - File::open(&path)?.read_to_end(&mut buf)?; - z.start_file(path.to_string_lossy(), opts)?; + // TODO: dont read entire file into memory + File::open(path)?.read_to_end(&mut buf)?; + // TODO: should this always do basename? some chals might need specific + // file structure but including dirs should work fine + z.start_file(path.file_name().unwrap().to_string_lossy(), opts)?; z.write_all(&buf)?; buf.clear(); } z.finish(); - Ok(vec![chal.directory.join(archive_name)]) + Ok(archive_name.to_path_buf()) } From 59b68d8b6a5bf3c82d845012770347e78db66635 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Tue, 28 Jan 2025 09:19:39 -0800 Subject: [PATCH 07/11] Switch to PathBufs for path strings in challenge Provide configs Signed-off-by: Robert Detjens --- src/builder/artifacts.rs | 32 +++++++++------------ src/builder/docker.rs | 4 +-- src/builder/mod.rs | 51 +++++++++++++++++++-------------- src/configparser/challenge.rs | 10 ++++--- src/tests/parsing/challenges.rs | 16 +++++------ 5 files changed, 60 insertions(+), 53 deletions(-) diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs index 2548776..64530b1 100644 --- a/src/builder/artifacts.rs +++ b/src/builder/artifacts.rs @@ -48,7 +48,7 @@ pub async fn extract_asset( async fn extract_files( chal: &ChallengeConfig, container: &docker::ContainerInfo, - files: &Vec, + files: &[PathBuf], ) -> Result> { debug!( "extracting {} files without renaming: {:?}", @@ -56,12 +56,10 @@ async fn extract_files( files ); - try_join_all(files.iter().map(|f| { - let from = PathBuf::from(f); + try_join_all(files.iter().map(|from| async { // use basename of source file as target name let to = chal.directory.join(from.file_name().unwrap()); - - docker::copy_file(container, from, to) + docker::copy_file(container, from, &to).await })) .await } @@ -70,13 +68,12 @@ async fn extract_files( async fn extract_rename( chal: &ChallengeConfig, container: &docker::ContainerInfo, - file: &str, - new_name: &str, + file: &Path, + new_name: &Path, ) -> Result> { debug!("extracting file {:?} renamed to {:?}", file, new_name); - let new_file = - docker::copy_file(container, PathBuf::from(file), PathBuf::from(new_name)).await?; + let new_file = docker::copy_file(container, file, new_name).await?; Ok(vec![new_file]) } @@ -85,8 +82,9 @@ async fn extract_rename( async fn extract_archive( chal: &ChallengeConfig, container: &docker::ContainerInfo, - files: &Vec, - archive_name: &str, + // files: &Vec, + files: &[PathBuf], + archive_name: &Path, ) -> Result> { debug!( "extracting {} files {:?} into archive {:?}", @@ -97,21 +95,19 @@ async fn extract_archive( // copy all listed files to tempdir let tempdir = tempdir_in(".")?; - let copied_files = try_join_all(files.iter().map(|f| { - let from = PathBuf::from(f); + let copied_files = try_join_all(files.iter().map(|from| async { let to = tempdir.path().join(from.file_name().unwrap()); - - docker::copy_file(container, from, to) + docker::copy_file(container, from, &to).await })) .await?; - zip_files(&chal.directory.join(archive_name), copied_files)?; + zip_files(&chal.directory.join(archive_name), &copied_files)?; Ok(vec![chal.directory.join(archive_name)]) } /// Add multiple local `files` to a zipfile at `zip_name` -fn zip_files(archive_name: &Path, files: Vec) -> Result { +pub fn zip_files(archive_name: &Path, files: &[PathBuf]) -> Result { debug!("creating zip at {:?}", archive_name); let zipfile = File::create(archive_name)?; let mut z = zip::ZipWriter::new(zipfile); @@ -119,7 +115,7 @@ fn zip_files(archive_name: &Path, files: Vec) -> Result { let mut buf = vec![]; for path in files.iter() { - trace!("adding {:?} to zip", &path); + trace!("adding {:?} to zip", path); // TODO: dont read entire file into memory File::open(path)?.read_to_end(&mut buf)?; // TODO: should this always do basename? some chals might need specific diff --git a/src/builder/docker.rs b/src/builder/docker.rs index 0c9c807..c0ef563 100644 --- a/src/builder/docker.rs +++ b/src/builder/docker.rs @@ -157,7 +157,7 @@ pub async fn remove_container(container: ContainerInfo) -> Result<()> { Ok(()) } -pub async fn copy_file(container: &ContainerInfo, from: PathBuf, to: PathBuf) -> Result { +pub async fn copy_file(container: &ContainerInfo, from: &Path, to: &Path) -> Result { trace!("copying {}:{from:?} to {to:?}", container.name); let client = docker().await?; @@ -197,7 +197,7 @@ pub async fn copy_file(container: &ContainerInfo, from: PathBuf, to: PathBuf) -> if let Some(mut entry_r) = tar.entries()?.next() { let mut entry = entry_r?; trace!("got entry: {:?}", entry.path()); - let mut target = File::create(&to)?; + let mut target = File::create(to)?; io::copy(&mut entry, &mut target)?; } else { bail!( diff --git a/src/builder/mod.rs b/src/builder/mod.rs index ab6d41c..83bd44c 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -16,10 +16,8 @@ use crate::configparser::challenge::{ }; use crate::configparser::{enabled_challenges, get_config}; -pub mod docker; - pub mod artifacts; -use artifacts::extract_asset; +pub mod docker; // define tag format as reusable macro macro_rules! image_tag_str { @@ -125,19 +123,19 @@ fn build_challenge( if extract_artifacts { info!("extracting build artifacts for chal {:?}", chal.directory); - // associate file `Provide` entries that have a `from:` source with their corresponding container image - let image_assoc = chal - .provide + let (provide_container, provide_static): (Vec<_>, Vec<_>) = + chal.provide.iter().partition(|p| p.from.is_some()); + + let extracted_files = provide_container .iter() - .filter_map(|p| { - p.from - .as_ref() - .map(|f| (p, chal.container_tag_for_pod(profile_name, f))) + // associate container `Provide` entries with their corresponding container image + .map(|provide| { + ( + provide, + chal.container_tag_for_pod(profile_name, provide.from.as_ref().unwrap()), + ) }) - .collect_vec(); - - built.assets = image_assoc - .into_iter() + // extract each container provide entry .map(|(p, tag)| { let tag = tag?; @@ -148,13 +146,14 @@ fn build_challenge( ); let container = docker::create_container(&tag, &name)?; - let asset_result = extract_asset(chal, p, &container).with_context(|| { - format!( - "failed to extract build artifacts for chal {:?} container {:?}", - chal.directory, - p.from.clone().unwrap() - ) - }); + let asset_result = + artifacts::extract_asset(chal, p, &container).with_context(|| { + format!( + "failed to extract build artifacts for chal {:?} container {:?}", + chal.directory, + p.from.clone().unwrap() + ) + }); // clean up container even if it failed docker::remove_container(container)?; @@ -164,6 +163,16 @@ fn build_challenge( .flatten_ok() .collect::>>()?; + // handle potentially zipping up local files as well + let local_files = provide_static.iter().map(|provide| { + match provide.as_file.as_ref() { + // no archiving needed, pass files as-is + None => Ok(provide.include.clone()), + // need to archive multiple files into zip + Some(as_) => artifacts::zip_files(as_, provide.include.as_ref()).map(|z| vec![z]), + } + }); + info!("extracted artifacts: {:?}", built.assets); } diff --git a/src/configparser/challenge.rs b/src/configparser/challenge.rs index 5754160..472c1d0 100644 --- a/src/configparser/challenge.rs +++ b/src/configparser/challenge.rs @@ -141,6 +141,8 @@ pub struct ChallengeConfig { pods: Vec, // optional if no containers used } impl ChallengeConfig { + /// Return the container image tag for the pod; either the upstream image or + /// the tag to be built if the image is to be built from source. pub fn container_tag_for_pod(&self, profile_name: &str, pod_name: &str) -> Result { let config = get_config()?; let pod = self @@ -180,7 +182,7 @@ enum FlagType { #[derive(Debug, PartialEq, Serialize, Deserialize)] #[fully_pub] struct FilePath { - file: String, + file: PathBuf, } #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -215,11 +217,11 @@ struct ProvideConfig { /// they are zipped into an archive with this filename. If this is omitted, /// each file(s) are listed individually with no renaming. #[serde(default, rename = "as")] - as_file: Option, + as_file: Option, /// List of files to read from the repo or container. If reading from the /// repo source files, only relative paths are supported. - include: Vec, + include: Vec, } impl FromStr for ProvideConfig { type Err = Void; @@ -227,7 +229,7 @@ impl FromStr for ProvideConfig { Ok(ProvideConfig { from: None, as_file: None, - include: vec![s.to_string()], + include: vec![PathBuf::from(s)], }) } } diff --git a/src/tests/parsing/challenges.rs b/src/tests/parsing/challenges.rs index 04b8f8b..585f3e6 100644 --- a/src/tests/parsing/challenges.rs +++ b/src/tests/parsing/challenges.rs @@ -241,27 +241,27 @@ fn challenge_provide() { ProvideConfig { from: None, as_file: None, - include: vec!["foo.txt".to_string()] + include: vec!["foo.txt".into()] }, ProvideConfig { from: None, as_file: None, - include: vec!["bar.txt".to_string(), "baz.txt".to_string()] + include: vec!["bar.txt".into(), "baz.txt".into()] }, ProvideConfig { from: None, - as_file: Some("stuff.zip".to_string()), - include: vec!["ducks".to_string(), "beavers".to_string()] + as_file: Some("stuff.zip".into()), + include: vec!["ducks".into(), "beavers".into()] }, ProvideConfig { - from: Some("container".to_string()), + from: Some("container".into()), as_file: None, - include: vec!["/foo/bar".to_string()] + include: vec!["/foo/bar".into()] }, ProvideConfig { from: Some("container".to_string()), - as_file: Some("shells.zip".to_string()), - include: vec!["/usr/bin/bash".to_string(), "/usr/bin/zsh".to_string()] + as_file: Some("shells.zip".into()), + include: vec!["/usr/bin/bash".into(), "/usr/bin/zsh".into()] } ], ); From 2f01285fc972ddd696f549f155619fe5204a671d Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Wed, 29 Jan 2025 20:35:27 -0800 Subject: [PATCH 08/11] some deploy command skeleton Signed-off-by: Robert Detjens --- src/commands/deploy.rs | 64 ++++++++++++++++++++++++++++++++++++------ src/deploy/mod.rs | 17 +++++++---- 2 files changed, 68 insertions(+), 13 deletions(-) diff --git a/src/commands/deploy.rs b/src/commands/deploy.rs index 5f21383..34af71f 100644 --- a/src/commands/deploy.rs +++ b/src/commands/deploy.rs @@ -7,14 +7,15 @@ use std::env::current_exe; use std::process::exit; use crate::clients::kube_client; -use crate::cluster_setup as setup; +use crate::cluster_setup; use crate::configparser::config::ProfileConfig; use crate::configparser::{get_config, get_profile_config}; -#[tokio::main(flavor = "current_thread")] // make this a sync function -pub async fn run(profile_name: &str, _no_build: &bool, _dry_run: &bool) { - info!("deploying challenges..."); +use crate::builder::build_challenges; +use crate::deploy::{deploy_challenges, update_frontend, upload_assets}; +#[tokio::main(flavor = "current_thread")] // make this a sync function +pub async fn run(profile_name: &str, no_build: &bool, _dry_run: &bool) { let profile = get_profile_config(profile_name).unwrap(); // has the cluster been setup? @@ -23,13 +24,57 @@ pub async fn run(profile_name: &str, _no_build: &bool, _dry_run: &bool) { exit(1); } - // let build_result = + // build before deploying + if *no_build { + warn!(""); + warn!("Not building before deploying! are you sure this is a good idea?"); + warn!(""); + } + + info!("building challenges..."); + let build_results = match build_challenges(profile_name, true, true) { + Ok(result) => result, + Err(e) => { + error!("{e:?}"); + exit(1); + } + }; + + // deploy needs to: + // A) render kubernetes manifests + // - namespace, deployment, service, ingress + // - upgrade ingress config with new listen ports + // + // B) upload asset files to bucket + // + // C) update frontend with new state of challenges + + // A) + info!("deploying challenges..."); + if let Err(e) = deploy_challenges(profile_name, &build_results).await { + error!("{e:?}"); + exit(1); + } + + // B) + info!("deploying challenges..."); + if let Err(e) = upload_assets(profile_name, &build_results).await { + error!("{e:?}"); + exit(1); + } + + // A) + info!("deploying challenges..."); + if let Err(e) = update_frontend(profile_name, &build_results).await { + error!("{e:?}"); + exit(1); + } } /// check to make sure that the needed ingress charts are deployed and running async fn check_setup(profile: &ProfileConfig) -> Result<()> { let kube = kube_client(profile).await?; - let secrets: kube::Api = kube::Api::namespaced(kube, setup::INGRESS_NAMESPACE); + let secrets: kube::Api = kube::Api::namespaced(kube, cluster_setup::INGRESS_NAMESPACE); let all_releases = secrets .list_metadata(&ListParams::default().labels("owner=helm")) @@ -93,12 +138,15 @@ async fn check_setup(profile: &ProfileConfig) -> Result<()> { .iter() .fold(Err(anyhow!("")), |e, reason| match reason { ChartFailure::Missing(c) => e.with_context(|| { - format!("chart {}/{c} is not deployed", setup::INGRESS_NAMESPACE) + format!( + "chart {}/{c} is not deployed", + cluster_setup::INGRESS_NAMESPACE + ) }), ChartFailure::DeploymentFailed(c) => e.with_context(|| { format!( "chart {}/{c} is in a failed state", - setup::INGRESS_NAMESPACE + cluster_setup::INGRESS_NAMESPACE ) }), }) diff --git a/src/deploy/mod.rs b/src/deploy/mod.rs index 55dd0f9..26735d6 100644 --- a/src/deploy/mod.rs +++ b/src/deploy/mod.rs @@ -1,29 +1,34 @@ +use std::path::PathBuf; + use anyhow::{anyhow, bail, Context, Error, Result}; use itertools::Itertools; use simplelog::*; +use crate::builder::BuildResult; use crate::clients::{bucket_client, kube_client}; use crate::cluster_setup as setup; use crate::configparser::config::ProfileConfig; use crate::configparser::{enabled_challenges, get_config, get_profile_config}; /// Render challenge manifest templates and apply to cluster -pub async fn deploy_challenges(profile_name: &str) -> Result<()> { +pub async fn deploy_challenges(profile_name: &str, build_results: &[BuildResult]) -> Result<()> { let profile = get_profile_config(profile_name)?; + let enabled_challenges = enabled_challenges(profile_name)?; todo!() } /// Upload files to frontend asset bucket /// Returns urls of upload files. -pub async fn upload_assets(profile_name: &str) -> Result> { +pub async fn upload_assets( + profile_name: &str, + build_results: &[BuildResult], +) -> Result> { let profile = get_profile_config(profile_name)?; let enabled_challenges = enabled_challenges(profile_name)?; let bucket = bucket_client(&profile.s3)?; - for chal in enabled_challenges {} - todo!() // TODO: should uploaded URLs be a (generated) part of the challenge config @@ -31,7 +36,9 @@ pub async fn upload_assets(profile_name: &str) -> Result> { } /// Sync deployed challenges with rCTF frontend -pub async fn update_frontend(profile_name: &str) -> Result<()> { +pub async fn update_frontend(profile_name: &str, build_results: &[BuildResult]) -> Result<()> { let profile = get_profile_config(profile_name)?; + let enabled_challenges = enabled_challenges(profile_name)?; + todo!() } From 4f0545ddd50fd20e440e7315256f9fa6a31affa3 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Thu, 30 Jan 2025 17:21:20 -0800 Subject: [PATCH 09/11] represent chal provide variants as explicit enum types Signed-off-by: Robert Detjens --- src/builder/artifacts.rs | 58 +++++++++++------ src/builder/mod.rs | 98 ++++++++++++++-------------- src/configparser/challenge.rs | 110 ++++++++++++++++++-------------- src/tests/parsing/challenges.rs | 56 +++++++++------- 4 files changed, 183 insertions(+), 139 deletions(-) diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs index 64530b1..67daddc 100644 --- a/src/builder/artifacts.rs +++ b/src/builder/artifacts.rs @@ -10,37 +10,57 @@ use tempfile::tempdir_in; use zip; use crate::builder::docker; +use crate::clients::docker; use crate::configparser::challenge::{ChallengeConfig, ProvideConfig}; -/// extract assets from given container name and provide config to challenge directory, return file path(s) extracted +/// extract assets from provide config and possible container to challenge directory, return file path(s) extracted #[tokio::main(flavor = "current_thread")] // make this a sync function pub async fn extract_asset( chal: &ChallengeConfig, provide: &ProvideConfig, - container: &docker::ContainerInfo, + // pod_containers: ) -> Result> { - debug!("extracting assets from container {}", &container.name); - // This needs to handle three cases: - // - single or multiple files without renaming (no as: field) - // - single file with rename (one item with as:) - // - multiple files as archive (multiple items with as:) + // This needs to handle three cases * 2 sources: + // - single or multiple files without renaming (no as: field) + // - single file with rename (one item with as:) + // - multiple files as archive (multiple items with as:) + // and whether the file is coming from + // - the repo + // - or a container // TODO: since this puts artifacts in the repo source folder, this should // try to not overwrite any existing files. - match &provide.as_file { - // no renaming, copy out all as-is - None => extract_files(chal, container, &provide.include).await, - // (as is keyword, so add underscore) - Some(as_) => { - if provide.include.len() == 1 { - // single file, rename - extract_rename(chal, container, &provide.include[0], as_).await - } else { - // multiple files, zip as archive - extract_archive(chal, container, &provide.include, as_).await - } + // debug!("extracting assets from container {}", &container.name); + + let docker = docker().await?; + + match provide { + // No action necessary, return path as-is + ProvideConfig::FromRepo { files } => Ok(files.clone()), + ProvideConfig::FromRepoRename { from, to } => { + std::fs::rename(from, to)?; + Ok(vec![to.clone()]) } + ProvideConfig::FromRepoArchive { + files, + archive_name, + } => { + zip_files(archive_name, files)?; + Ok(vec![archive_name.clone()]) + } + + ProvideConfig::FromContainer { container, files } => extract_files(chal, container, files), + ProvideConfig::FromContainerRename { + container, + from, + to, + } => extract_rename(chal, container, from, to), + ProvideConfig::FromContainerArchive { + container, + files, + archive_name, + } => extract_archive(chal, container, files, archive_name), } } diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 83bd44c..0a0809c 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -123,55 +123,55 @@ fn build_challenge( if extract_artifacts { info!("extracting build artifacts for chal {:?}", chal.directory); - let (provide_container, provide_static): (Vec<_>, Vec<_>) = - chal.provide.iter().partition(|p| p.from.is_some()); - - let extracted_files = provide_container - .iter() - // associate container `Provide` entries with their corresponding container image - .map(|provide| { - ( - provide, - chal.container_tag_for_pod(profile_name, provide.from.as_ref().unwrap()), - ) - }) - // extract each container provide entry - .map(|(p, tag)| { - let tag = tag?; - - let name = format!( - "asset-container-{}-{}", - chal.directory.to_string_lossy().replace("/", "-"), - p.from.clone().unwrap() - ); - let container = docker::create_container(&tag, &name)?; - - let asset_result = - artifacts::extract_asset(chal, p, &container).with_context(|| { - format!( - "failed to extract build artifacts for chal {:?} container {:?}", - chal.directory, - p.from.clone().unwrap() - ) - }); - - // clean up container even if it failed - docker::remove_container(container)?; - - asset_result - }) - .flatten_ok() - .collect::>>()?; - - // handle potentially zipping up local files as well - let local_files = provide_static.iter().map(|provide| { - match provide.as_file.as_ref() { - // no archiving needed, pass files as-is - None => Ok(provide.include.clone()), - // need to archive multiple files into zip - Some(as_) => artifacts::zip_files(as_, provide.include.as_ref()).map(|z| vec![z]), - } - }); + // let (provide_container, provide_static): (Vec<_>, Vec<_>) = + // chal.provide.iter().partition(|p| p.from.is_some()); + + // let extracted_files = provide_container + // .iter() + // // associate container `Provide` entries with their corresponding container image + // .map(|provide| { + // ( + // provide, + // chal.container_tag_for_pod(profile_name, provide.from.as_ref().unwrap()), + // ) + // }) + // // extract each container provide entry + // .map(|(p, tag)| { + // let tag = tag?; + + // let name = format!( + // "asset-container-{}-{}", + // chal.directory.to_string_lossy().replace("/", "-"), + // p.from.clone().unwrap() + // ); + // let container = docker::create_container(&tag, &name)?; + + // let asset_result = + // artifacts::extract_asset(chal, p, &container).with_context(|| { + // format!( + // "failed to extract build artifacts for chal {:?} container {:?}", + // chal.directory, + // p.from.clone().unwrap() + // ) + // }); + + // // clean up container even if it failed + // docker::remove_container(container)?; + + // asset_result + // }) + // .flatten_ok() + // .collect::>>()?; + + // // handle potentially zipping up local files as well + // let local_files = provide_static.iter().map(|provide| { + // match provide.as_file.as_ref() { + // // no archiving needed, pass files as-is + // None => Ok(provide.include.clone()), + // // need to archive multiple files into zip + // Some(as_) => artifacts::zip_files(as_, provide.include.as_ref()).map(|z| vec![z]), + // } + // }); info!("extracted artifacts: {:?}", built.assets); } diff --git a/src/configparser/challenge.rs b/src/configparser/challenge.rs index 472c1d0..e482e08 100644 --- a/src/configparser/challenge.rs +++ b/src/configparser/challenge.rs @@ -173,63 +173,75 @@ fn default_difficulty() -> i64 { #[fully_pub] enum FlagType { RawString(String), - File(FilePath), - Text(FileText), - Regex(FileRegex), - Verifier(FileVerifier), + File { file: PathBuf }, + Text { text: String }, + Regex { regex: String }, + Verifier { verifier: String }, } +// Parse each distinct kind of Provide action as a separate enum variant #[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged, deny_unknown_fields)] #[fully_pub] -struct FilePath { - file: PathBuf, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct FileText { - text: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct FileRegex { - regex: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct FileVerifier { - verifier: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct ProvideConfig { - /// The challenge container image where the file should be fetched from, - /// based on the names in `pods`. If omitted, the default value is the repo - /// challenge directory. - #[serde(default)] - from: Option, - - /// Rename or create zip file from included files. If only one file is - /// included, it is renamed to this value. If multiple files are included, - /// they are zipped into an archive with this filename. If this is omitted, - /// each file(s) are listed individually with no renaming. - #[serde(default, rename = "as")] - as_file: Option, - - /// List of files to read from the repo or container. If reading from the - /// repo source files, only relative paths are supported. - include: Vec, +enum ProvideConfig { + /// Upload file(s) as-is. + /// Single or multiple files with no as: or from: + /// Default if only a string is given. + FromRepo { + #[serde(rename = "include")] + files: Vec, + }, + /// Rename single file before uploading. + /// Single file with as: field without from: + FromRepoRename { + #[serde(rename = "include")] + from: PathBuf, + #[serde(rename = "as")] + to: PathBuf, + }, + /// Upload multiple files in zip archive + /// Multiple files with as: field without from: + FromRepoArchive { + #[serde(rename = "include")] + files: Vec, + #[serde(rename = "as")] + archive_name: PathBuf, + }, + + /// Upload file(s) from container as-is. + /// Single or multiple files with no as: + FromContainer { + #[serde(rename = "from")] + container: String, + #[serde(rename = "include")] + files: Vec, + }, + /// Rename single file from container before uploading. + /// Single file with as: field + FromContainerRename { + #[serde(rename = "from")] + container: String, + #[serde(rename = "include")] + from: PathBuf, + #[serde(rename = "as")] + to: PathBuf, + }, + /// Upload multiple files from container in zip archive + /// Multiple files with as: field + FromContainerArchive { + #[serde(rename = "from")] + container: String, + #[serde(rename = "include")] + files: Vec, + #[serde(rename = "as")] + archive_name: PathBuf, + }, } impl FromStr for ProvideConfig { type Err = Void; fn from_str(s: &str) -> std::result::Result { - Ok(ProvideConfig { - from: None, - as_file: None, - include: vec![PathBuf::from(s)], + Ok(ProvideConfig::FromRepo { + files: vec![PathBuf::from(s)], }) } } diff --git a/src/tests/parsing/challenges.rs b/src/tests/parsing/challenges.rs index 585f3e6..e83796e 100644 --- a/src/tests/parsing/challenges.rs +++ b/src/tests/parsing/challenges.rs @@ -88,9 +88,9 @@ fn challenge_two_levels() { category: "foo".to_string(), directory: PathBuf::from("foo/test"), - flag: FlagType::Text(FileText { + flag: FlagType::Text { text: "test{it-works}".to_string() - }), + }, provide: vec![], pods: vec![], @@ -212,10 +212,14 @@ fn challenge_provide() { provide: - foo.txt + - include: - bar.txt - baz.txt + - as: oranges + include: apples + - as: stuff.zip include: - ducks @@ -225,6 +229,11 @@ fn challenge_provide() { include: - /foo/bar + - from: container + as: pears + include: + - /usr/lib/peaches + - from: container as: shells.zip include: @@ -238,30 +247,33 @@ fn challenge_provide() { assert_eq!( chals[0].provide, vec![ - ProvideConfig { - from: None, - as_file: None, - include: vec!["foo.txt".into()] + ProvideConfig::FromRepo { + files: vec!["foo.txt".into()] + }, + ProvideConfig::FromRepo { + files: vec!["bar.txt".into(), "baz.txt".into()] + }, + ProvideConfig::FromRepoRename { + from: "apples".into(), + to: "oranges".into() }, - ProvideConfig { - from: None, - as_file: None, - include: vec!["bar.txt".into(), "baz.txt".into()] + ProvideConfig::FromRepoArchive { + files: vec!["ducks".into(), "beavers".into()], + archive_name: "stuff.zip".into() }, - ProvideConfig { - from: None, - as_file: Some("stuff.zip".into()), - include: vec!["ducks".into(), "beavers".into()] + ProvideConfig::FromContainer { + container: "container".to_string(), + files: vec!["/foo/bar".into()] }, - ProvideConfig { - from: Some("container".into()), - as_file: None, - include: vec!["/foo/bar".into()] + ProvideConfig::FromContainerRename { + container: "container".to_string(), + from: "/usr/lib/peaches".into(), + to: "pears".into(), }, - ProvideConfig { - from: Some("container".to_string()), - as_file: Some("shells.zip".into()), - include: vec!["/usr/bin/bash".into(), "/usr/bin/zsh".into()] + ProvideConfig::FromContainerArchive { + container: "container".to_string(), + files: vec!["/usr/bin/bash".into(), "/usr/bin/zsh".into()], + archive_name: "shells.zip".into(), } ], ); From 38292e4af1fd7dc99448826c0f3c86865b98ce5f Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Fri, 31 Jan 2025 19:28:54 -0800 Subject: [PATCH 10/11] Update build artifact extraction for new enum types Signed-off-by: Robert Detjens --- src/access_handlers/docker.rs | 1 + src/builder/artifacts.rs | 95 ++++++++++++++++++++++++++++----- src/builder/docker.rs | 7 +-- src/builder/mod.rs | 67 ++++++----------------- src/configparser/challenge.rs | 1 + src/tests/parsing/challenges.rs | 3 +- tests/repo/rcds.yaml | 4 +- 7 files changed, 109 insertions(+), 69 deletions(-) diff --git a/src/access_handlers/docker.rs b/src/access_handlers/docker.rs index 5c17463..4de2a59 100644 --- a/src/access_handlers/docker.rs +++ b/src/access_handlers/docker.rs @@ -23,6 +23,7 @@ pub async fn check(profile_name: &str) -> Result<()> { let registry_config = &get_config()?.registry; let client = docker().await?; + // build test image string // registry.example.com/somerepo/testimage:pleaseignore let test_image = format!("{}/credstestimage", registry_config.domain); diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs index 67daddc..256c3a9 100644 --- a/src/builder/artifacts.rs +++ b/src/builder/artifacts.rs @@ -18,7 +18,7 @@ use crate::configparser::challenge::{ChallengeConfig, ProvideConfig}; pub async fn extract_asset( chal: &ChallengeConfig, provide: &ProvideConfig, - // pod_containers: + profile_name: &str, ) -> Result> { // This needs to handle three cases * 2 sources: // - single or multiple files without renaming (no as: field) @@ -31,36 +31,104 @@ pub async fn extract_asset( // TODO: since this puts artifacts in the repo source folder, this should // try to not overwrite any existing files. - // debug!("extracting assets from container {}", &container.name); + debug!( + "extracting assets for challenge {:?} provide {:?}", + chal.directory, &provide + ); let docker = docker().await?; match provide { + // Repo file paths are relative to the challenge directory, so prepend chal dir + // No action necessary, return path as-is - ProvideConfig::FromRepo { files } => Ok(files.clone()), + ProvideConfig::FromRepo { files } => { + Ok(files.iter().map(|f| chal.directory.join(f)).collect_vec()) + } ProvideConfig::FromRepoRename { from, to } => { - std::fs::rename(from, to)?; + std::fs::copy(chal.directory.join(from), chal.directory.join(to)) + .with_context(|| format!("could not copy repo file {from:?} to {to:?}"))?; Ok(vec![to.clone()]) } ProvideConfig::FromRepoArchive { files, archive_name, } => { - zip_files(archive_name, files)?; + zip_files( + &chal.directory.join(archive_name), + &files.iter().map(|f| chal.directory.join(f)).collect_vec(), + ) + .with_context(|| format!("could not create archive {archive_name:?}"))?; Ok(vec![archive_name.clone()]) } - ProvideConfig::FromContainer { container, files } => extract_files(chal, container, files), + ProvideConfig::FromContainer { + container: container_name, + files, + } => { + let tag = chal.container_tag_for_pod(profile_name, container_name)?; + + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + container_name + ); + let container = docker::create_container(&tag, &name).await?; + + let files = extract_files(chal, &container, files).await; + + docker::remove_container(container).await?; + + files + } + .with_context(|| format!("could not copy files {files:?} from container {container_name}")), + ProvideConfig::FromContainerRename { - container, + container: container_name, from, to, - } => extract_rename(chal, container, from, to), + } => { + let tag = chal.container_tag_for_pod(profile_name, container_name)?; + + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + container_name + ); + let container = docker::create_container(&tag, &name).await?; + + let files = extract_rename(chal, &container, from, &chal.directory.join(to)).await; + + docker::remove_container(container).await?; + + files + } + .with_context(|| format!("could not copy file {from:?} from container {container_name}")), + ProvideConfig::FromContainerArchive { - container, + container: container_name, files, archive_name, - } => extract_archive(chal, container, files, archive_name), + } => { + let tag = chal.container_tag_for_pod(profile_name, container_name)?; + + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + container_name + ); + let container = docker::create_container(&tag, &name).await?; + + let files = + extract_archive(chal, &container, files, &chal.directory.join(archive_name)).await; + + docker::remove_container(container).await?; + + files + } + .with_context(|| { + format!("could not create archive {archive_name:?} from container {container_name}") + }), } } @@ -114,14 +182,17 @@ async fn extract_archive( ); // copy all listed files to tempdir - let tempdir = tempdir_in(".")?; + let tempdir = tempfile::Builder::new() + .prefix(".beavercds-archive-") + .tempdir_in(".")?; let copied_files = try_join_all(files.iter().map(|from| async { let to = tempdir.path().join(from.file_name().unwrap()); docker::copy_file(container, from, &to).await })) .await?; - zip_files(&chal.directory.join(archive_name), &copied_files)?; + // archive_name already has the chal dir prepended + zip_files(archive_name, &copied_files)?; Ok(vec![chal.directory.join(archive_name)]) } diff --git a/src/builder/docker.rs b/src/builder/docker.rs index c0ef563..fb5a58d 100644 --- a/src/builder/docker.rs +++ b/src/builder/docker.rs @@ -122,7 +122,6 @@ pub async fn push_image(image_tag: &str, creds: &UserPass) -> Result { Ok(tag.to_string()) } -#[tokio::main(flavor = "current_thread")] // make this a sync function pub async fn create_container(image_tag: &str, name: &str) -> Result { debug!("creating container {name:?} from image {image_tag:?}"); let client = docker().await?; @@ -143,7 +142,6 @@ pub async fn create_container(image_tag: &str, name: &str) -> Result Result<()> { debug!("removing container {}", &container.name); let client = docker().await?; @@ -182,7 +180,10 @@ pub async fn copy_file(container: &ContainerInfo, from: &Path, to: &Path) -> Res }); // collect byte stream chunks into full file - let mut temptar = Builder::new().suffix(".tar").tempfile_in(".")?; + let mut temptar = Builder::new() + .prefix(".beavercds-") + .suffix(".tar") + .tempfile_in(".")?; while let Some(chunk) = dl_stream.next().await { temptar.as_file_mut().write_all(&chunk?)?; } diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 0a0809c..7333c23 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -3,7 +3,8 @@ use anyhow::{anyhow, Context, Error, Result}; use bollard::image::BuildImageOptions; -use futures::stream::Iter; +use futures::future::try_join_all; +use futures::stream::{FuturesOrdered, Iter}; use itertools::Itertools; use simplelog::*; use std::default; @@ -123,55 +124,21 @@ fn build_challenge( if extract_artifacts { info!("extracting build artifacts for chal {:?}", chal.directory); - // let (provide_container, provide_static): (Vec<_>, Vec<_>) = - // chal.provide.iter().partition(|p| p.from.is_some()); - - // let extracted_files = provide_container - // .iter() - // // associate container `Provide` entries with their corresponding container image - // .map(|provide| { - // ( - // provide, - // chal.container_tag_for_pod(profile_name, provide.from.as_ref().unwrap()), - // ) - // }) - // // extract each container provide entry - // .map(|(p, tag)| { - // let tag = tag?; - - // let name = format!( - // "asset-container-{}-{}", - // chal.directory.to_string_lossy().replace("/", "-"), - // p.from.clone().unwrap() - // ); - // let container = docker::create_container(&tag, &name)?; - - // let asset_result = - // artifacts::extract_asset(chal, p, &container).with_context(|| { - // format!( - // "failed to extract build artifacts for chal {:?} container {:?}", - // chal.directory, - // p.from.clone().unwrap() - // ) - // }); - - // // clean up container even if it failed - // docker::remove_container(container)?; - - // asset_result - // }) - // .flatten_ok() - // .collect::>>()?; - - // // handle potentially zipping up local files as well - // let local_files = provide_static.iter().map(|provide| { - // match provide.as_file.as_ref() { - // // no archiving needed, pass files as-is - // None => Ok(provide.include.clone()), - // // need to archive multiple files into zip - // Some(as_) => artifacts::zip_files(as_, provide.include.as_ref()).map(|z| vec![z]), - // } - // }); + // extract each challenge provide entry + // this handles both local files and from build containers + let extracted_files = chal + .provide + .iter() + .map(|p| { + artifacts::extract_asset(chal, p, profile_name).with_context(|| { + format!( + "failed to extract build artifacts for chal {:?}", + chal.directory, + ) + }) + }) + .flatten_ok() + .collect::>>()?; info!("extracted artifacts: {:?}", built.assets); } diff --git a/src/configparser/challenge.rs b/src/configparser/challenge.rs index e482e08..3c5d2f7 100644 --- a/src/configparser/challenge.rs +++ b/src/configparser/challenge.rs @@ -180,6 +180,7 @@ enum FlagType { } // Parse each distinct kind of Provide action as a separate enum variant +// TODO: enforce relative/absolute paths for repo/container Provide's (`validator` crate?) #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(untagged, deny_unknown_fields)] #[fully_pub] diff --git a/src/tests/parsing/challenges.rs b/src/tests/parsing/challenges.rs index e83796e..2ad84c9 100644 --- a/src/tests/parsing/challenges.rs +++ b/src/tests/parsing/challenges.rs @@ -231,8 +231,7 @@ fn challenge_provide() { - from: container as: pears - include: - - /usr/lib/peaches + include: /usr/lib/peaches - from: container as: shells.zip diff --git a/tests/repo/rcds.yaml b/tests/repo/rcds.yaml index 94d5da8..315af51 100644 --- a/tests/repo/rcds.yaml +++ b/tests/repo/rcds.yaml @@ -22,9 +22,9 @@ points: deploy: # control challenge deployment status explicitly per environment/profile testing: - misc/garf: true + # misc/garf: true pwn/notsh: true - web/bar: true + # web/bar: true profiles: # configure per-environment credentials etc From 587eb3658a5b1b0e161e676aed9373a0a4fe8869 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Sat, 1 Feb 2025 13:38:01 -0800 Subject: [PATCH 11/11] associate build result info with its corresponding challenge Signed-off-by: Robert Detjens --- src/builder/mod.rs | 6 +++--- src/deploy/mod.rs | 14 ++++++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 7333c23..94ef933 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -51,10 +51,10 @@ pub fn build_challenges( profile_name: &str, push: bool, extract_artifacts: bool, -) -> Result> { +) -> Result> { enabled_challenges(profile_name)? - .iter() - .map(|chal| build_challenge(profile_name, chal, push, extract_artifacts)) + .into_iter() + .map(|chal| build_challenge(profile_name, chal, push, extract_artifacts).map(|r| (chal, r))) .collect::>() } diff --git a/src/deploy/mod.rs b/src/deploy/mod.rs index 26735d6..391406d 100644 --- a/src/deploy/mod.rs +++ b/src/deploy/mod.rs @@ -8,10 +8,13 @@ use crate::builder::BuildResult; use crate::clients::{bucket_client, kube_client}; use crate::cluster_setup as setup; use crate::configparser::config::ProfileConfig; -use crate::configparser::{enabled_challenges, get_config, get_profile_config}; +use crate::configparser::{enabled_challenges, get_config, get_profile_config, ChallengeConfig}; /// Render challenge manifest templates and apply to cluster -pub async fn deploy_challenges(profile_name: &str, build_results: &[BuildResult]) -> Result<()> { +pub async fn deploy_challenges( + profile_name: &str, + build_results: &[(&ChallengeConfig, BuildResult)], +) -> Result<()> { let profile = get_profile_config(profile_name)?; let enabled_challenges = enabled_challenges(profile_name)?; @@ -22,7 +25,7 @@ pub async fn deploy_challenges(profile_name: &str, build_results: &[BuildResult] /// Returns urls of upload files. pub async fn upload_assets( profile_name: &str, - build_results: &[BuildResult], + build_results: &[(&ChallengeConfig, BuildResult)], ) -> Result> { let profile = get_profile_config(profile_name)?; let enabled_challenges = enabled_challenges(profile_name)?; @@ -36,7 +39,10 @@ pub async fn upload_assets( } /// Sync deployed challenges with rCTF frontend -pub async fn update_frontend(profile_name: &str, build_results: &[BuildResult]) -> Result<()> { +pub async fn update_frontend( + profile_name: &str, + build_results: &[(&ChallengeConfig, BuildResult)], +) -> Result<()> { let profile = get_profile_config(profile_name)?; let enabled_challenges = enabled_challenges(profile_name)?;