diff --git a/src/access_handlers/docker.rs b/src/access_handlers/docker.rs index 5c17463..4de2a59 100644 --- a/src/access_handlers/docker.rs +++ b/src/access_handlers/docker.rs @@ -23,6 +23,7 @@ pub async fn check(profile_name: &str) -> Result<()> { let registry_config = &get_config()?.registry; let client = docker().await?; + // build test image string // registry.example.com/somerepo/testimage:pleaseignore let test_image = format!("{}/credstestimage", registry_config.domain); diff --git a/src/access_handlers/s3.rs b/src/access_handlers/s3.rs index f1ed323..c50888f 100644 --- a/src/access_handlers/s3.rs +++ b/src/access_handlers/s3.rs @@ -3,6 +3,7 @@ use s3; use simplelog::*; use tokio; +use crate::clients::{bucket_client, bucket_client_anonymous}; use crate::configparser::{ config::{ProfileConfig, S3Config}, get_config, get_profile_config, @@ -60,37 +61,3 @@ pub async fn check(profile_name: &str) -> Result<()> { Ok(()) } - -/// create bucket client for passed profile config -pub fn bucket_client(config: &S3Config) -> Result> { - trace!("creating bucket client"); - // TODO: once_cell this so it reuses the same bucket? - let region = s3::Region::Custom { - region: config.region.clone(), - endpoint: config.endpoint.clone(), - }; - let creds = s3::creds::Credentials::new( - Some(&config.access_key), - Some(&config.secret_key), - None, - None, - None, - )?; - let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); - - Ok(bucket) -} - -/// create public/anonymous bucket client for passed profile config -pub fn bucket_client_anonymous(config: &S3Config) -> Result> { - trace!("creating anon bucket client"); - // TODO: once_cell this so it reuses the same bucket? - let region = s3::Region::Custom { - region: config.region.clone(), - endpoint: config.endpoint.clone(), - }; - let creds = s3::creds::Credentials::anonymous()?; - let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); - - Ok(bucket) -} diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs index d114730..256c3a9 100644 --- a/src/builder/artifacts.rs +++ b/src/builder/artifacts.rs @@ -10,37 +10,125 @@ use tempfile::tempdir_in; use zip; use crate::builder::docker; +use crate::clients::docker; use crate::configparser::challenge::{ChallengeConfig, ProvideConfig}; -/// extract assets from given container name and provide config to challenge directory, return file path(s) extracted +/// extract assets from provide config and possible container to challenge directory, return file path(s) extracted #[tokio::main(flavor = "current_thread")] // make this a sync function pub async fn extract_asset( chal: &ChallengeConfig, provide: &ProvideConfig, - container: &docker::ContainerInfo, + profile_name: &str, ) -> Result> { - debug!("extracting assets from container {}", &container.name); - // This needs to handle three cases: - // - single or multiple files without renaming (no as: field) - // - single file with rename (one item with as:) - // - multiple files as archive (multiple items with as:) + // This needs to handle three cases * 2 sources: + // - single or multiple files without renaming (no as: field) + // - single file with rename (one item with as:) + // - multiple files as archive (multiple items with as:) + // and whether the file is coming from + // - the repo + // - or a container // TODO: since this puts artifacts in the repo source folder, this should // try to not overwrite any existing files. - match &provide.as_file { - // no renaming, copy out all as-is - None => extract_files(chal, container, &provide.include).await, - // (as is keyword, so add underscore) - Some(as_) => { - if provide.include.len() == 1 { - // single file, rename - extract_rename(chal, container, &provide.include[0], as_).await - } else { - // multiple files, zip as archive - extract_archive(chal, container, &provide.include, as_).await - } + debug!( + "extracting assets for challenge {:?} provide {:?}", + chal.directory, &provide + ); + + let docker = docker().await?; + + match provide { + // Repo file paths are relative to the challenge directory, so prepend chal dir + + // No action necessary, return path as-is + ProvideConfig::FromRepo { files } => { + Ok(files.iter().map(|f| chal.directory.join(f)).collect_vec()) + } + ProvideConfig::FromRepoRename { from, to } => { + std::fs::copy(chal.directory.join(from), chal.directory.join(to)) + .with_context(|| format!("could not copy repo file {from:?} to {to:?}"))?; + Ok(vec![to.clone()]) } + ProvideConfig::FromRepoArchive { + files, + archive_name, + } => { + zip_files( + &chal.directory.join(archive_name), + &files.iter().map(|f| chal.directory.join(f)).collect_vec(), + ) + .with_context(|| format!("could not create archive {archive_name:?}"))?; + Ok(vec![archive_name.clone()]) + } + + ProvideConfig::FromContainer { + container: container_name, + files, + } => { + let tag = chal.container_tag_for_pod(profile_name, container_name)?; + + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + container_name + ); + let container = docker::create_container(&tag, &name).await?; + + let files = extract_files(chal, &container, files).await; + + docker::remove_container(container).await?; + + files + } + .with_context(|| format!("could not copy files {files:?} from container {container_name}")), + + ProvideConfig::FromContainerRename { + container: container_name, + from, + to, + } => { + let tag = chal.container_tag_for_pod(profile_name, container_name)?; + + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + container_name + ); + let container = docker::create_container(&tag, &name).await?; + + let files = extract_rename(chal, &container, from, &chal.directory.join(to)).await; + + docker::remove_container(container).await?; + + files + } + .with_context(|| format!("could not copy file {from:?} from container {container_name}")), + + ProvideConfig::FromContainerArchive { + container: container_name, + files, + archive_name, + } => { + let tag = chal.container_tag_for_pod(profile_name, container_name)?; + + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + container_name + ); + let container = docker::create_container(&tag, &name).await?; + + let files = + extract_archive(chal, &container, files, &chal.directory.join(archive_name)).await; + + docker::remove_container(container).await?; + + files + } + .with_context(|| { + format!("could not create archive {archive_name:?} from container {container_name}") + }), } } @@ -48,7 +136,7 @@ pub async fn extract_asset( async fn extract_files( chal: &ChallengeConfig, container: &docker::ContainerInfo, - files: &Vec, + files: &[PathBuf], ) -> Result> { debug!( "extracting {} files without renaming: {:?}", @@ -56,12 +144,10 @@ async fn extract_files( files ); - try_join_all(files.iter().map(|f| { - let from = PathBuf::from(f); + try_join_all(files.iter().map(|from| async { // use basename of source file as target name let to = chal.directory.join(from.file_name().unwrap()); - - docker::copy_file(container, from, to) + docker::copy_file(container, from, &to).await })) .await } @@ -70,13 +156,12 @@ async fn extract_files( async fn extract_rename( chal: &ChallengeConfig, container: &docker::ContainerInfo, - file: &str, - new_name: &str, + file: &Path, + new_name: &Path, ) -> Result> { debug!("extracting file {:?} renamed to {:?}", file, new_name); - let new_file = - docker::copy_file(container, PathBuf::from(file), PathBuf::from(new_name)).await?; + let new_file = docker::copy_file(container, file, new_name).await?; Ok(vec![new_file]) } @@ -85,8 +170,9 @@ async fn extract_rename( async fn extract_archive( chal: &ChallengeConfig, container: &docker::ContainerInfo, - files: &Vec, - archive_name: &str, + // files: &Vec, + files: &[PathBuf], + archive_name: &Path, ) -> Result> { debug!( "extracting {} files {:?} into archive {:?}", @@ -96,30 +182,41 @@ async fn extract_archive( ); // copy all listed files to tempdir - let tempdir = tempdir_in(".")?; - let copied_files = try_join_all(files.iter().map(|f| { - let from = PathBuf::from(f); + let tempdir = tempfile::Builder::new() + .prefix(".beavercds-archive-") + .tempdir_in(".")?; + let copied_files = try_join_all(files.iter().map(|from| async { let to = tempdir.path().join(from.file_name().unwrap()); - - docker::copy_file(container, from, to) + docker::copy_file(container, from, &to).await })) .await?; - // write them all to new zip - let zipfile = File::create(chal.directory.join(archive_name))?; + // archive_name already has the chal dir prepended + zip_files(archive_name, &copied_files)?; + + Ok(vec![chal.directory.join(archive_name)]) +} + +/// Add multiple local `files` to a zipfile at `zip_name` +pub fn zip_files(archive_name: &Path, files: &[PathBuf]) -> Result { + debug!("creating zip at {:?}", archive_name); + let zipfile = File::create(archive_name)?; let mut z = zip::ZipWriter::new(zipfile); let opts = zip::write::SimpleFileOptions::default(); let mut buf = vec![]; - for path in copied_files.into_iter() { - trace!("adding {:?} to zip", &path); - File::open(&path)?.read_to_end(&mut buf)?; - z.start_file(path.to_string_lossy(), opts)?; + for path in files.iter() { + trace!("adding {:?} to zip", path); + // TODO: dont read entire file into memory + File::open(path)?.read_to_end(&mut buf)?; + // TODO: should this always do basename? some chals might need specific + // file structure but including dirs should work fine + z.start_file(path.file_name().unwrap().to_string_lossy(), opts)?; z.write_all(&buf)?; buf.clear(); } z.finish(); - Ok(vec![chal.directory.join(archive_name)]) + Ok(archive_name.to_path_buf()) } diff --git a/src/builder/docker.rs b/src/builder/docker.rs index 0c9c807..fb5a58d 100644 --- a/src/builder/docker.rs +++ b/src/builder/docker.rs @@ -122,7 +122,6 @@ pub async fn push_image(image_tag: &str, creds: &UserPass) -> Result { Ok(tag.to_string()) } -#[tokio::main(flavor = "current_thread")] // make this a sync function pub async fn create_container(image_tag: &str, name: &str) -> Result { debug!("creating container {name:?} from image {image_tag:?}"); let client = docker().await?; @@ -143,7 +142,6 @@ pub async fn create_container(image_tag: &str, name: &str) -> Result Result<()> { debug!("removing container {}", &container.name); let client = docker().await?; @@ -157,7 +155,7 @@ pub async fn remove_container(container: ContainerInfo) -> Result<()> { Ok(()) } -pub async fn copy_file(container: &ContainerInfo, from: PathBuf, to: PathBuf) -> Result { +pub async fn copy_file(container: &ContainerInfo, from: &Path, to: &Path) -> Result { trace!("copying {}:{from:?} to {to:?}", container.name); let client = docker().await?; @@ -182,7 +180,10 @@ pub async fn copy_file(container: &ContainerInfo, from: PathBuf, to: PathBuf) -> }); // collect byte stream chunks into full file - let mut temptar = Builder::new().suffix(".tar").tempfile_in(".")?; + let mut temptar = Builder::new() + .prefix(".beavercds-") + .suffix(".tar") + .tempfile_in(".")?; while let Some(chunk) = dl_stream.next().await { temptar.as_file_mut().write_all(&chunk?)?; } @@ -197,7 +198,7 @@ pub async fn copy_file(container: &ContainerInfo, from: PathBuf, to: PathBuf) -> if let Some(mut entry_r) = tar.entries()?.next() { let mut entry = entry_r?; trace!("got entry: {:?}", entry.path()); - let mut target = File::create(&to)?; + let mut target = File::create(to)?; io::copy(&mut entry, &mut target)?; } else { bail!( diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 7801b1a..94ef933 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -3,23 +3,22 @@ use anyhow::{anyhow, Context, Error, Result}; use bollard::image::BuildImageOptions; -use futures::stream::Iter; +use futures::future::try_join_all; +use futures::stream::{FuturesOrdered, Iter}; use itertools::Itertools; use simplelog::*; use std::default; use std::fmt::Pointer; use std::iter::zip; -use std::path::Path; +use std::path::{Path, PathBuf}; use crate::configparser::challenge::{ BuildObject, ChallengeConfig, ImageSource::*, Pod, ProvideConfig, }; use crate::configparser::{enabled_challenges, get_config}; -pub mod docker; - pub mod artifacts; -use artifacts::extract_asset; +pub mod docker; // define tag format as reusable macro macro_rules! image_tag_str { @@ -27,17 +26,35 @@ macro_rules! image_tag_str { "{registry}/{challenge}-{container}:{profile}" }; } +pub(super) use image_tag_str; + +/// Information about all of a challenge's build artifacts. +#[derive(Debug)] +pub struct BuildResult { + /// Container image tags of all containers in the challenge, if the challenge has container images. + /// Will be empty if challenge has no images built from source. + tags: Vec, + /// Path on disk to local assets (both built and static). + /// Will be empty if challenge has no file assets + assets: Vec, +} + +/// Tag string with added context of where it came from (built locally or an upstream image) +#[derive(Debug)] +pub enum TagWithSource { + Upstream(String), + Built(String), +} /// Build all enabled challenges for the given profile. Returns tags built pub fn build_challenges( profile_name: &str, push: bool, extract_artifacts: bool, -) -> Result> { +) -> Result> { enabled_challenges(profile_name)? - .iter() - .map(|chal| build_challenge(profile_name, chal, push, extract_artifacts)) - .flatten_ok() + .into_iter() + .map(|chal| build_challenge(profile_name, chal, push, extract_artifacts).map(|r| (chal, r))) .collect::>() } @@ -47,44 +64,55 @@ fn build_challenge( chal: &ChallengeConfig, push: bool, extract_artifacts: bool, -) -> Result> { +) -> Result { debug!("building images for chal {:?}", chal.directory); let config = get_config()?; - let built_tags: Vec<_> = chal + let mut built = BuildResult { + tags: vec![], + assets: vec![], + }; + + built.tags = chal .pods .iter() - .filter_map(|p| match &p.image_source { - Image(_) => None, - Build(b) => { - let tag = format!( - image_tag_str!(), - registry = config.registry.domain, - challenge = chal.name, - container = p.name, - profile = profile_name - ); - Some( - docker::build_image(&chal.directory, b, &tag).with_context(|| { - format!( - "error building image {} for chal {}", - p.name, - chal.directory.to_string_lossy() - ) - }), - ) + .map(|p| match &p.image_source { + Image(tag) => Ok(TagWithSource::Upstream(tag.to_string())), + // build any pods that need building + Build(build) => { + let tag = chal.container_tag_for_pod(profile_name, &p.name)?; + + let res = docker::build_image(&chal.directory, build, &tag).with_context(|| { + format!( + "error building image {} for chal {}", + p.name, + chal.directory.to_string_lossy() + ) + }); + // map result tag string into enum + res.map(TagWithSource::Built) } }) .collect::>()?; if push { + // only need to push tags we actually built + let tags_to_push = built + .tags + .iter() + .filter_map(|t| match t { + TagWithSource::Built(t) => Some(t), + TagWithSource::Upstream(_) => None, + }) + .collect_vec(); + debug!( "pushing {} tags for chal {:?}", - built_tags.len(), + tags_to_push.len(), chal.directory ); - built_tags + tags_to_push .iter() .map(|tag| { docker::push_image(tag, &config.registry.build) @@ -96,53 +124,24 @@ fn build_challenge( if extract_artifacts { info!("extracting build artifacts for chal {:?}", chal.directory); - // find the matching tag for Provide entries that have a `from:` source - let image_assoc = chal + // extract each challenge provide entry + // this handles both local files and from build containers + let extracted_files = chal .provide .iter() - .filter_map(|p| { - p.from.as_ref().map(|f| { - ( - p, - format!( - image_tag_str!(), - registry = config.registry.domain, - challenge = chal.name, - container = f, - profile = profile_name - ), - ) - }) - }) - .collect_vec(); - - let assets = image_assoc - .into_iter() - .map(|(p, tag)| { - let name = format!( - "asset-container-{}-{}", - chal.directory.to_string_lossy().replace("/", "-"), - p.from.clone().unwrap() - ); - let container = docker::create_container(&tag, &name)?; - - let asset_result = extract_asset(chal, p, &container).with_context(|| { + .map(|p| { + artifacts::extract_asset(chal, p, profile_name).with_context(|| { format!( - "failed to extract build artifacts for chal {:?} container {:?}", + "failed to extract build artifacts for chal {:?}", chal.directory, - p.from.clone().unwrap() ) - }); - - // clean up container even if it failed - docker::remove_container(container)?; - - asset_result + }) }) .flatten_ok() .collect::>>()?; - info!("extracted artifacts: {:?}", assets); + info!("extracted artifacts: {:?}", built.assets); } - Ok(built_tags) + + Ok(built) } diff --git a/src/clients.rs b/src/clients.rs index 2b26b45..88230a1 100644 --- a/src/clients.rs +++ b/src/clients.rs @@ -9,6 +9,7 @@ use kube::{ core::ResourceExt, discovery::{ApiCapabilities, ApiResource, Discovery, Scope}, }; +use s3; use simplelog::*; use crate::configparser::config; @@ -49,6 +50,44 @@ pub async fn engine_type() -> EngineType { } } +// +// S3 stuff +// + +/// create bucket client for passed profile config +pub fn bucket_client(config: &config::S3Config) -> Result> { + trace!("creating bucket client"); + // TODO: once_cell this so it reuses the same bucket? + let region = s3::Region::Custom { + region: config.region.clone(), + endpoint: config.endpoint.clone(), + }; + let creds = s3::creds::Credentials::new( + Some(&config.access_key), + Some(&config.secret_key), + None, + None, + None, + )?; + let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); + + Ok(bucket) +} + +/// create public/anonymous bucket client for passed profile config +pub fn bucket_client_anonymous(config: &config::S3Config) -> Result> { + trace!("creating anon bucket client"); + // TODO: once_cell this so it reuses the same bucket? + let region = s3::Region::Custom { + region: config.region.clone(), + endpoint: config.endpoint.clone(), + }; + let creds = s3::creds::Credentials::anonymous()?; + let bucket = s3::Bucket::new(&config.bucket_name, region, creds)?.with_path_style(); + + Ok(bucket) +} + // // Kubernetes stuff // diff --git a/src/cluster_setup/mod.rs b/src/cluster_setup/mod.rs index c5c2795..cb9a19e 100644 --- a/src/cluster_setup/mod.rs +++ b/src/cluster_setup/mod.rs @@ -27,6 +27,9 @@ use crate::configparser::{config, get_config, get_profile_config}; // Some components can or must be deployed and configured ahead of time, like // the ingress controller, cert-manager, and external-dns +// install these charts into this namespace +pub const INGRESS_NAMESPACE: &str = "ingress"; + pub async fn install_ingress(profile: &config::ProfileConfig) -> Result<()> { info!("deploying ingress-nginx chart..."); @@ -38,7 +41,7 @@ pub async fn install_ingress(profile: &config::ProfileConfig) -> Result<()> { "ingress-nginx", Some("https://kubernetes.github.io/ingress-nginx"), "ingress-nginx", - "ingress", + INGRESS_NAMESPACE, VALUES, ) .context("failed to install ingress-nginx helm chart") @@ -55,7 +58,7 @@ pub async fn install_certmanager(profile: &config::ProfileConfig) -> Result<()> "cert-manager", Some("https://charts.jetstack.io"), "cert-manager", - "ingress", + INGRESS_NAMESPACE, VALUES, )?; @@ -87,7 +90,7 @@ pub async fn install_extdns(profile: &config::ProfileConfig) -> Result<()> { "oci://registry-1.docker.io/bitnamicharts/external-dns", None, "external-dns", - "ingress", + INGRESS_NAMESPACE, &values, ) } diff --git a/src/commands/build.rs b/src/commands/build.rs index b6e89d7..d4ce2ad 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -8,8 +8,8 @@ use crate::configparser::{get_config, get_profile_config}; pub fn run(profile_name: &str, push: &bool, extract: &bool) { info!("building images..."); - let tags = match build_challenges(profile_name, *push, *extract) { - Ok(tags) => tags, + let results = match build_challenges(profile_name, *push, *extract) { + Ok(results) => results, Err(e) => { error!("{e:?}"); exit(1) diff --git a/src/commands/deploy.rs b/src/commands/deploy.rs index 890f095..34af71f 100644 --- a/src/commands/deploy.rs +++ b/src/commands/deploy.rs @@ -1,3 +1,166 @@ -pub fn run(_profile: &str, _no_build: &bool, _dry_run: &bool) { - println!("running deploy!"); +use anyhow::{anyhow, bail, Context, Error, Result}; +use itertools::Itertools; +use k8s_openapi::api::core::v1::Secret; +use kube::api::ListParams; +use simplelog::*; +use std::env::current_exe; +use std::process::exit; + +use crate::clients::kube_client; +use crate::cluster_setup; +use crate::configparser::config::ProfileConfig; +use crate::configparser::{get_config, get_profile_config}; + +use crate::builder::build_challenges; +use crate::deploy::{deploy_challenges, update_frontend, upload_assets}; + +#[tokio::main(flavor = "current_thread")] // make this a sync function +pub async fn run(profile_name: &str, no_build: &bool, _dry_run: &bool) { + let profile = get_profile_config(profile_name).unwrap(); + + // has the cluster been setup? + if let Err(e) = check_setup(profile).await { + error!("{e:?}"); + exit(1); + } + + // build before deploying + if *no_build { + warn!(""); + warn!("Not building before deploying! are you sure this is a good idea?"); + warn!(""); + } + + info!("building challenges..."); + let build_results = match build_challenges(profile_name, true, true) { + Ok(result) => result, + Err(e) => { + error!("{e:?}"); + exit(1); + } + }; + + // deploy needs to: + // A) render kubernetes manifests + // - namespace, deployment, service, ingress + // - upgrade ingress config with new listen ports + // + // B) upload asset files to bucket + // + // C) update frontend with new state of challenges + + // A) + info!("deploying challenges..."); + if let Err(e) = deploy_challenges(profile_name, &build_results).await { + error!("{e:?}"); + exit(1); + } + + // B) + info!("deploying challenges..."); + if let Err(e) = upload_assets(profile_name, &build_results).await { + error!("{e:?}"); + exit(1); + } + + // A) + info!("deploying challenges..."); + if let Err(e) = update_frontend(profile_name, &build_results).await { + error!("{e:?}"); + exit(1); + } +} + +/// check to make sure that the needed ingress charts are deployed and running +async fn check_setup(profile: &ProfileConfig) -> Result<()> { + let kube = kube_client(profile).await?; + let secrets: kube::Api = kube::Api::namespaced(kube, cluster_setup::INGRESS_NAMESPACE); + + let all_releases = secrets + .list_metadata(&ListParams::default().labels("owner=helm")) + .await?; + + // pull helm release version from secret label + macro_rules! helm_version { + ($s:ident) => { + $s.get("version") + .unwrap_or(&"".to_string()) + .parse::() + .unwrap_or(0) + }; + } + let expected_charts = ["ingress-nginx", "cert-manager", "external-dns"]; + let latest_releases = expected_charts + .iter() + .map(|chart| { + // pick latest release + all_releases + .iter() + .map(|r| r.metadata.labels.as_ref().unwrap()) + .filter(|rl| rl.get("name") == Some(&chart.to_string())) + .max_by(|a, b| helm_version!(a).cmp(&helm_version!(b))) + }) + .collect_vec(); + + enum ChartFailure { + Missing(String), + DeploymentFailed(String), + } + + // make sure all releases are present and deployed successfully + let missing = latest_releases + .iter() + .zip(expected_charts) + .filter_map(|(r, c)| { + // is label status=deployed ? + if r.is_none() { + return Some(ChartFailure::Missing(c.to_string())); + } + + if r.unwrap().get("status") == Some(&"deployed".to_string()) { + // all is good + None + } else { + Some(ChartFailure::DeploymentFailed(c.to_string())) + } + }) + .collect_vec(); + + if !missing.is_empty() { + // if any errors are present, collect/reduce them all into one error via + // anyhow context() calls. + // + // TODO: this should probably be returning Vec instead of a + // single Error chain. should this be in run() to present errors there + // instead of chaining and returning one combined Error here? + #[allow(clippy::manual_try_fold)] // need to build the Result ourselves + missing + .iter() + .fold(Err(anyhow!("")), |e, reason| match reason { + ChartFailure::Missing(c) => e.with_context(|| { + format!( + "chart {}/{c} is not deployed", + cluster_setup::INGRESS_NAMESPACE + ) + }), + ChartFailure::DeploymentFailed(c) => e.with_context(|| { + format!( + "chart {}/{c} is in a failed state", + cluster_setup::INGRESS_NAMESPACE + ) + }), + }) + .with_context(|| { + format!( + "cluster has not been set up with needed charts (run `{} cluster-setup`)", + current_exe() + .unwrap() + .file_name() + .unwrap_or_default() + .to_string_lossy() + ) + }) + } else { + Ok(()) + } } diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 4b39abf..62beb26 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -3,3 +3,6 @@ pub mod check_access; pub mod cluster_setup; pub mod deploy; pub mod validate; + +// These modules should not do much and act mostly as a thunk to handle +// displaying outputs/errors of the real function. diff --git a/src/configparser/challenge.rs b/src/configparser/challenge.rs index 193544c..3c5d2f7 100644 --- a/src/configparser/challenge.rs +++ b/src/configparser/challenge.rs @@ -12,8 +12,10 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use void::Void; +use crate::builder::image_tag_str; use crate::configparser::config::Resource; use crate::configparser::field_coersion::string_or_struct; +use crate::configparser::get_config; pub fn parse_all() -> Result, Vec> { // find all challenge.yaml files @@ -138,6 +140,29 @@ pub struct ChallengeConfig { #[serde(default)] pods: Vec, // optional if no containers used } +impl ChallengeConfig { + /// Return the container image tag for the pod; either the upstream image or + /// the tag to be built if the image is to be built from source. + pub fn container_tag_for_pod(&self, profile_name: &str, pod_name: &str) -> Result { + let config = get_config()?; + let pod = self + .pods + .iter() + .find(|p| p.name == pod_name) + .ok_or(anyhow!("pod {} not found in challenge", pod_name))?; + + match &pod.image_source { + ImageSource::Image(t) => Ok(t.to_string()), + ImageSource::Build(b) => Ok(format!( + image_tag_str!(), + registry = config.registry.domain, + challenge = self.name, + container = pod.name, + profile = profile_name + )), + } + } +} fn default_difficulty() -> i64 { 1 @@ -148,63 +173,76 @@ fn default_difficulty() -> i64 { #[fully_pub] enum FlagType { RawString(String), - File(FilePath), - Text(FileText), - Regex(FileRegex), - Verifier(FileVerifier), + File { file: PathBuf }, + Text { text: String }, + Regex { regex: String }, + Verifier { verifier: String }, } +// Parse each distinct kind of Provide action as a separate enum variant +// TODO: enforce relative/absolute paths for repo/container Provide's (`validator` crate?) #[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged, deny_unknown_fields)] #[fully_pub] -struct FilePath { - file: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct FileText { - text: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct FileRegex { - regex: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct FileVerifier { - verifier: String, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[fully_pub] -struct ProvideConfig { - /// The challenge container image where the file should be fetched from, - /// based on the names in `pods`. If omitted, the default value is the repo - /// challenge directory. - #[serde(default)] - from: Option, - - /// Rename or create zip file from included files. If only one file is - /// included, it is renamed to this value. If multiple files are included, - /// they are zipped into an archive with this filename. If this is omitted, - /// each file(s) are listed individually with no renaming. - #[serde(default, rename = "as")] - as_file: Option, - - /// List of files to read from the repo or container. If reading from the - /// repo source files, only relative paths are supported. - include: Vec, +enum ProvideConfig { + /// Upload file(s) as-is. + /// Single or multiple files with no as: or from: + /// Default if only a string is given. + FromRepo { + #[serde(rename = "include")] + files: Vec, + }, + /// Rename single file before uploading. + /// Single file with as: field without from: + FromRepoRename { + #[serde(rename = "include")] + from: PathBuf, + #[serde(rename = "as")] + to: PathBuf, + }, + /// Upload multiple files in zip archive + /// Multiple files with as: field without from: + FromRepoArchive { + #[serde(rename = "include")] + files: Vec, + #[serde(rename = "as")] + archive_name: PathBuf, + }, + + /// Upload file(s) from container as-is. + /// Single or multiple files with no as: + FromContainer { + #[serde(rename = "from")] + container: String, + #[serde(rename = "include")] + files: Vec, + }, + /// Rename single file from container before uploading. + /// Single file with as: field + FromContainerRename { + #[serde(rename = "from")] + container: String, + #[serde(rename = "include")] + from: PathBuf, + #[serde(rename = "as")] + to: PathBuf, + }, + /// Upload multiple files from container in zip archive + /// Multiple files with as: field + FromContainerArchive { + #[serde(rename = "from")] + container: String, + #[serde(rename = "include")] + files: Vec, + #[serde(rename = "as")] + archive_name: PathBuf, + }, } impl FromStr for ProvideConfig { type Err = Void; fn from_str(s: &str) -> std::result::Result { - Ok(ProvideConfig { - from: None, - as_file: None, - include: vec![s.to_string()], + Ok(ProvideConfig::FromRepo { + files: vec![PathBuf::from(s)], }) } } diff --git a/src/deploy/mod.rs b/src/deploy/mod.rs new file mode 100644 index 0000000..391406d --- /dev/null +++ b/src/deploy/mod.rs @@ -0,0 +1,50 @@ +use std::path::PathBuf; + +use anyhow::{anyhow, bail, Context, Error, Result}; +use itertools::Itertools; +use simplelog::*; + +use crate::builder::BuildResult; +use crate::clients::{bucket_client, kube_client}; +use crate::cluster_setup as setup; +use crate::configparser::config::ProfileConfig; +use crate::configparser::{enabled_challenges, get_config, get_profile_config, ChallengeConfig}; + +/// Render challenge manifest templates and apply to cluster +pub async fn deploy_challenges( + profile_name: &str, + build_results: &[(&ChallengeConfig, BuildResult)], +) -> Result<()> { + let profile = get_profile_config(profile_name)?; + let enabled_challenges = enabled_challenges(profile_name)?; + + todo!() +} + +/// Upload files to frontend asset bucket +/// Returns urls of upload files. +pub async fn upload_assets( + profile_name: &str, + build_results: &[(&ChallengeConfig, BuildResult)], +) -> Result> { + let profile = get_profile_config(profile_name)?; + let enabled_challenges = enabled_challenges(profile_name)?; + + let bucket = bucket_client(&profile.s3)?; + + todo!() + + // TODO: should uploaded URLs be a (generated) part of the challenge config + // struct? +} + +/// Sync deployed challenges with rCTF frontend +pub async fn update_frontend( + profile_name: &str, + build_results: &[(&ChallengeConfig, BuildResult)], +) -> Result<()> { + let profile = get_profile_config(profile_name)?; + let enabled_challenges = enabled_challenges(profile_name)?; + + todo!() +} diff --git a/src/lib.rs b/src/lib.rs index 3178fd2..9ac8b4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,6 +8,7 @@ pub mod clients; pub mod cluster_setup; pub mod commands; pub mod configparser; +pub mod deploy; #[cfg(test)] mod tests; diff --git a/src/tests/parsing/challenges.rs b/src/tests/parsing/challenges.rs index 04b8f8b..2ad84c9 100644 --- a/src/tests/parsing/challenges.rs +++ b/src/tests/parsing/challenges.rs @@ -88,9 +88,9 @@ fn challenge_two_levels() { category: "foo".to_string(), directory: PathBuf::from("foo/test"), - flag: FlagType::Text(FileText { + flag: FlagType::Text { text: "test{it-works}".to_string() - }), + }, provide: vec![], pods: vec![], @@ -212,10 +212,14 @@ fn challenge_provide() { provide: - foo.txt + - include: - bar.txt - baz.txt + - as: oranges + include: apples + - as: stuff.zip include: - ducks @@ -225,6 +229,10 @@ fn challenge_provide() { include: - /foo/bar + - from: container + as: pears + include: /usr/lib/peaches + - from: container as: shells.zip include: @@ -238,30 +246,33 @@ fn challenge_provide() { assert_eq!( chals[0].provide, vec![ - ProvideConfig { - from: None, - as_file: None, - include: vec!["foo.txt".to_string()] + ProvideConfig::FromRepo { + files: vec!["foo.txt".into()] + }, + ProvideConfig::FromRepo { + files: vec!["bar.txt".into(), "baz.txt".into()] + }, + ProvideConfig::FromRepoRename { + from: "apples".into(), + to: "oranges".into() }, - ProvideConfig { - from: None, - as_file: None, - include: vec!["bar.txt".to_string(), "baz.txt".to_string()] + ProvideConfig::FromRepoArchive { + files: vec!["ducks".into(), "beavers".into()], + archive_name: "stuff.zip".into() }, - ProvideConfig { - from: None, - as_file: Some("stuff.zip".to_string()), - include: vec!["ducks".to_string(), "beavers".to_string()] + ProvideConfig::FromContainer { + container: "container".to_string(), + files: vec!["/foo/bar".into()] }, - ProvideConfig { - from: Some("container".to_string()), - as_file: None, - include: vec!["/foo/bar".to_string()] + ProvideConfig::FromContainerRename { + container: "container".to_string(), + from: "/usr/lib/peaches".into(), + to: "pears".into(), }, - ProvideConfig { - from: Some("container".to_string()), - as_file: Some("shells.zip".to_string()), - include: vec!["/usr/bin/bash".to_string(), "/usr/bin/zsh".to_string()] + ProvideConfig::FromContainerArchive { + container: "container".to_string(), + files: vec!["/usr/bin/bash".into(), "/usr/bin/zsh".into()], + archive_name: "shells.zip".into(), } ], ); diff --git a/tests/repo/rcds.yaml b/tests/repo/rcds.yaml index 94d5da8..315af51 100644 --- a/tests/repo/rcds.yaml +++ b/tests/repo/rcds.yaml @@ -22,9 +22,9 @@ points: deploy: # control challenge deployment status explicitly per environment/profile testing: - misc/garf: true + # misc/garf: true pwn/notsh: true - web/bar: true + # web/bar: true profiles: # configure per-environment credentials etc