From 91d15248096857791d96afa1e7af94926de535bc Mon Sep 17 00:00:00 2001 From: MasterPtato Date: Mon, 24 Jun 2024 19:50:19 +0000 Subject: [PATCH] feat(bolt): build svcs as docker containers locally --- .dockerignore | 4 +- infra/tf/k8s_cluster_k3d/main.tf | 12 ++ infra/tf/k8s_cluster_k3d/output.tf | 7 + lib/bolt/config/src/ns.rs | 12 +- lib/bolt/core/src/context/project.rs | 37 ++++- lib/bolt/core/src/context/service.rs | 62 ++++---- lib/bolt/core/src/dep/cargo/cli.rs | 163 +++++++++++----------- lib/bolt/core/src/dep/k8s/gen.rs | 10 +- lib/bolt/core/src/dep/terraform/output.rs | 10 ++ lib/bolt/core/src/tasks/up.rs | 16 ++- scripts/forward/k8s_dashboard.sh | 14 ++ 11 files changed, 218 insertions(+), 129 deletions(-) create mode 100755 scripts/forward/k8s_dashboard.sh diff --git a/.dockerignore b/.dockerignore index 379491ca2..d83bad898 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,8 +4,8 @@ # Dockerfiles to. # Git -# **/.git/ -# **/.gitignore +**/.git/ +**/.gitignore **/.DS_Store **/symbolCache.db diff --git a/infra/tf/k8s_cluster_k3d/main.tf b/infra/tf/k8s_cluster_k3d/main.tf index 2ccd3241b..1c6eb8f8d 100644 --- a/infra/tf/k8s_cluster_k3d/main.tf +++ b/infra/tf/k8s_cluster_k3d/main.tf @@ -7,6 +7,11 @@ terraform { } } +locals { + repo_host = "svc" + repo_port = 5001 +} + resource "k3d_cluster" "main" { name = "rivet-${var.namespace}" @@ -80,6 +85,13 @@ resource "k3d_cluster" "main" { node_filters = ["server:0"] } + registries { + create { + name = "svc" + host_port = local.repo_port + } + } + k3s { extra_args { arg = "--disable=traefik" diff --git a/infra/tf/k8s_cluster_k3d/output.tf b/infra/tf/k8s_cluster_k3d/output.tf index 7fde52f77..3bce75352 100644 --- a/infra/tf/k8s_cluster_k3d/output.tf +++ b/infra/tf/k8s_cluster_k3d/output.tf @@ -2,3 +2,10 @@ output "traefik_external_ip" { value = var.public_ip } +output "repo_host" { + value = local.repo_host +} + +output "repo_port" { + value = local.repo_port +} diff --git a/lib/bolt/config/src/ns.rs b/lib/bolt/config/src/ns.rs index 326e6e9a4..2e3dcbea3 100644 --- a/lib/bolt/config/src/ns.rs +++ b/lib/bolt/config/src/ns.rs @@ -306,6 +306,7 @@ pub struct Docker { /// /// See [here](https://docs.docker.com/docker-hub/download-rate-limit) for /// more information on Docker Hub's rate limits. + #[serde(default)] pub authenticate_all_docker_hub_pulls: bool, /// Docker repository to upload builds to. Must end in a slash. #[serde(default = "default_docker_repo")] @@ -341,14 +342,21 @@ pub struct Kubernetes { #[derive(Serialize, Deserialize, Clone, Debug)] pub enum KubernetesProvider { #[serde(rename = "k3d")] - K3d {}, + K3d { + /// Tells bolt to use the K3d managed registry for svc builds. This will override ns.docker.repository + /// for image uploads. + #[serde(default)] + use_local_repo: bool, + }, #[serde(rename = "aws_eks")] AwsEks {}, } impl Default for KubernetesProvider { fn default() -> Self { - Self::K3d {} + Self::K3d { + use_local_repo: false, + } } } diff --git a/lib/bolt/core/src/context/project.rs b/lib/bolt/core/src/context/project.rs index eaf4e282c..ef659c968 100644 --- a/lib/bolt/core/src/context/project.rs +++ b/lib/bolt/core/src/context/project.rs @@ -11,7 +11,7 @@ use sha2::{Digest, Sha256}; use tokio::{fs, sync::Mutex}; use super::{RunContext, ServiceContext}; -use crate::{config, context, utils::command_helper::CommandHelper}; +use crate::{config, context, dep::terraform, utils::command_helper::CommandHelper}; pub type ProjectContext = Arc; @@ -343,6 +343,11 @@ impl ProjectContextData { "cannot enable billing without emailing" ); } + + assert!( + self.ns().docker.repository.ends_with('/'), + "docker repository must end with slash" + ); } // Traverses from FS root to CWD, returns first directory with Bolt.toml @@ -1017,6 +1022,36 @@ impl ProjectContextData { } } +impl ProjectContextData { + /// Gets the correct repo to push svc builds to/pull from. + pub async fn docker_repos(self: &Arc) -> (String, String) { + match self.ns().kubernetes.provider { + config::ns::KubernetesProvider::K3d { use_local_repo } if use_local_repo => { + let output = terraform::output::read_k8s_cluster_k3d(self).await; + let local_repo = format!("localhost:{}/", *output.repo_port); + let internal_repo = format!("{}:{}/", *output.repo_host, *output.repo_port); + + (local_repo, internal_repo) + } + _ => ( + self.ns().docker.repository.clone(), + self.ns().docker.repository.clone(), + ), + } + } + + /// Whether or not to build svc images locally vs inside of docker. + pub fn build_svcs_locally(&self) -> bool { + match self.ns().kubernetes.provider { + config::ns::KubernetesProvider::K3d { use_local_repo } if use_local_repo => false, + _ => matches!( + &self.ns().cluster.kind, + config::ns::ClusterKind::SingleNode { .. } + ), + } + } +} + impl ProjectContextData { pub fn leader_count(&self) -> usize { match &self.ns().cluster.kind { diff --git a/lib/bolt/core/src/context/service.rs b/lib/bolt/core/src/context/service.rs index fc0700925..b55455412 100644 --- a/lib/bolt/core/src/context/service.rs +++ b/lib/bolt/core/src/context/service.rs @@ -384,48 +384,52 @@ pub enum ServiceBuildPlan { ExistingUploadedBuild { image_tag: String }, /// Build the service and upload to Docker. - BuildAndUpload { image_tag: String }, + BuildAndUpload { + /// Push location (local repo) + push_image_tag: String, + /// Pull location (inside of k8s) + pull_image_tag: String, + }, } impl ServiceContextData { /// Determines if this service needs to be recompiled. pub async fn build_plan(&self, build_context: &BuildContext) -> Result { + let project_ctx = self.project().await; + // Check if build exists on docker.io - let pub_image_tag = self.docker_image_tag(Some("docker.io/rivetgg/")).await?; + let pub_image_tag = self.docker_image_tag(&project_ctx, "docker.io/rivetgg/")?; if docker::cli::container_exists(&pub_image_tag).await { return Ok(ServiceBuildPlan::ExistingUploadedBuild { image_tag: pub_image_tag, }); } - // Check if build exists in custom repo - let image_tag = self.docker_image_tag(None).await?; + // Check if build exists in config repo + let image_tag = self.docker_image_tag(&project_ctx, &project_ctx.ns().docker.repository)?; if docker::cli::container_exists(&image_tag).await { return Ok(ServiceBuildPlan::ExistingUploadedBuild { image_tag }); } - let project_ctx = self.project().await; + if project_ctx.build_svcs_locally() { + // Derive the build path + let optimization = match &build_context { + BuildContext::Bin { optimization } => optimization, + BuildContext::Test { .. } => &BuildOptimization::Debug, + }; + let output_path = self.rust_bin_path(optimization).await; - match &project_ctx.ns().cluster.kind { - // Build locally - config::ns::ClusterKind::SingleNode { .. } => { - // Derive the build path - let optimization = match &build_context { - BuildContext::Bin { optimization } => optimization, - BuildContext::Test { .. } => &BuildOptimization::Debug, - }; - let output_path = self.rust_bin_path(optimization).await; + // Rust libs always attempt to rebuild (handled by cargo) + Ok(ServiceBuildPlan::BuildLocally { + exec_path: output_path, + }) + } else { + let (push_repo, pull_repo) = project_ctx.docker_repos().await; - // Rust libs always attempt to rebuild (handled by cargo) - Ok(ServiceBuildPlan::BuildLocally { - exec_path: output_path, - }) - } - // Build and upload to S3 - config::ns::ClusterKind::Distributed { .. } => { - // Default to building - Ok(ServiceBuildPlan::BuildAndUpload { image_tag }) - } + Ok(ServiceBuildPlan::BuildAndUpload { + push_image_tag: self.docker_image_tag(&project_ctx, &push_repo)?, + pull_image_tag: self.docker_image_tag(&project_ctx, &pull_repo)?, + }) } } } @@ -1320,12 +1324,10 @@ impl ServiceContextData { } impl ServiceContextData { - pub async fn docker_image_tag(&self, override_repo: Option<&str>) -> Result { - let project_ctx = self.project().await; + pub fn docker_image_tag(&self, project_ctx: &ProjectContext, repo: &str) -> Result { + ensure!(repo.ends_with('/'), "docker repository must end with slash"); let source_hash = project_ctx.source_hash(); - let repo = override_repo.unwrap_or(&project_ctx.ns().docker.repository); - ensure!(repo.ends_with('/'), "docker repository must end with slash"); Ok(format!( "{}{}:{}", @@ -1336,7 +1338,9 @@ impl ServiceContextData { } pub async fn upload_build(&self) -> Result<()> { - let image_tag = self.docker_image_tag(None).await?; + let project_ctx = self.project().await; + let (push_repo, _) = project_ctx.docker_repos().await; + let image_tag = self.docker_image_tag(&project_ctx, &push_repo)?; let mut cmd = Command::new("docker"); cmd.arg("push"); diff --git a/lib/bolt/core/src/dep/cargo/cli.rs b/lib/bolt/core/src/dep/cargo/cli.rs index 9fd1864d3..d0c2528b5 100644 --- a/lib/bolt/core/src/dep/cargo/cli.rs +++ b/lib/bolt/core/src/dep/cargo/cli.rs @@ -104,43 +104,41 @@ pub async fn build<'a, T: AsRef>(ctx: &ProjectContext, opts: BuildOpts<'a, fs::write(&build_script_path, build_script).await?; // Execute build command - match &ctx.ns().cluster.kind { - config::ns::ClusterKind::SingleNode { .. } => { - // Make build script executable - let mut cmd = Command::new("chmod"); - cmd.current_dir(ctx.path()); - cmd.arg("+x"); - cmd.arg(build_script_path.display().to_string()); - let status = cmd.status().await?; - ensure!(status.success()); - - // Execute - let mut cmd = Command::new(build_script_path.display().to_string()); - cmd.current_dir(ctx.path()); - let status = cmd.status().await?; - ensure!(status.success()); - } - config::ns::ClusterKind::Distributed { .. } => { - let optimization = if opts.release { "release" } else { "debug" }; - let repo = &ctx.ns().docker.repository; - ensure!(repo.ends_with('/'), "docker repository must end with slash"); - let source_hash = ctx.source_hash(); - - // Create directory for docker files - let gen_path = ctx.gen_path().join("docker"); - fs::create_dir_all(&gen_path).await?; - - // Build all of the base binaries in batch to optimize build speed - // - // We could do this as a single multi-stage Docker container, but it requires - // re-hashing the entire project every time to check the build layers and can be - // faulty sometimes. - let build_image_tag = { - let image_tag = format!("{repo}build:{source_hash}"); - let dockerfile_path = gen_path.join(format!("Dockerfile.build")); - - // TODO: Use --secret to pass sccache credentials instead of the build script. - fs::write( + if ctx.build_svcs_locally() { + // Make build script executable + let mut cmd = Command::new("chmod"); + cmd.current_dir(ctx.path()); + cmd.arg("+x"); + cmd.arg(build_script_path.display().to_string()); + let status = cmd.status().await?; + ensure!(status.success()); + + // Execute + let mut cmd = Command::new(build_script_path.display().to_string()); + cmd.current_dir(ctx.path()); + let status = cmd.status().await?; + ensure!(status.success()); + } else { + let optimization = if opts.release { "release" } else { "debug" }; + // Get repo to push to + let (push_repo, _) = ctx.docker_repos().await; + let source_hash = ctx.source_hash(); + + // Create directory for docker files + let gen_path = ctx.gen_path().join("docker"); + fs::create_dir_all(&gen_path).await?; + + // Build all of the base binaries in batch to optimize build speed + // + // We could do this as a single multi-stage Docker container, but it requires + // re-hashing the entire project every time to check the build layers and can be + // faulty sometimes. + let build_image_tag = { + let image_tag = format!("{push_repo}build:{source_hash}"); + let dockerfile_path = gen_path.join(format!("Dockerfile.build")); + + // TODO: Use --secret to pass sccache credentials instead of the build script. + fs::write( &dockerfile_path, formatdoc!( r#" @@ -178,41 +176,41 @@ pub async fn build<'a, T: AsRef>(ctx: &ProjectContext, opts: BuildOpts<'a, ) .await?; - // Build image - let mut cmd = Command::new("docker"); - cmd.current_dir(ctx.path()); - cmd.arg("build"); - cmd.arg("-f").arg(dockerfile_path); - // Prints plain console output for debugging - // cmd.arg("--progress=plain"); - cmd.arg("-t").arg(&image_tag); - cmd.arg("."); + // Build image + let mut cmd = Command::new("docker"); + cmd.current_dir(ctx.path()); + cmd.arg("build"); + cmd.arg("-f").arg(dockerfile_path); + // Prints plain console output for debugging + // cmd.arg("--progress=plain"); + cmd.arg("-t").arg(&image_tag); + cmd.arg("."); - let status = cmd.status().await?; - ensure!(status.success()); + let status = cmd.status().await?; + ensure!(status.success()); - image_tag - }; - - for call in &opts.build_calls { - for bin in call.bins { - let bin = bin.as_ref(); - - // Resolve the symlink for the svc_scripts dir since Docker does not resolve - // symlinks in COPY commands - let infra_path = ctx.path().join("infra"); - let infra_path_resolved = tokio::fs::read_link(&infra_path) - .await - .map_or_else(|_| infra_path.clone(), |path| ctx.path().join(path)); - let svc_scripts_path = infra_path_resolved.join("misc").join("svc_scripts"); - let svc_scripts_path_relative = svc_scripts_path - .strip_prefix(ctx.path()) - .context("failed to strip prefix")?; - - // Build the final image - let image_tag = format!("{repo}{bin}:{source_hash}"); - let dockerfile_path = gen_path.join(format!("Dockerfile.{bin}")); - fs::write( + image_tag + }; + + for call in &opts.build_calls { + for bin in call.bins { + let bin = bin.as_ref(); + + // Resolve the symlink for the svc_scripts dir since Docker does not resolve + // symlinks in COPY commands + let infra_path = ctx.path().join("infra"); + let infra_path_resolved = tokio::fs::read_link(&infra_path) + .await + .map_or_else(|_| infra_path.clone(), |path| ctx.path().join(path)); + let svc_scripts_path = infra_path_resolved.join("misc").join("svc_scripts"); + let svc_scripts_path_relative = svc_scripts_path + .strip_prefix(ctx.path()) + .context("failed to strip prefix")?; + + // Build the final image + let image_tag = format!("{push_repo}{bin}:{source_hash}"); + let dockerfile_path = gen_path.join(format!("Dockerfile.{bin}")); + fs::write( &dockerfile_path, formatdoc!( r#" @@ -235,19 +233,18 @@ pub async fn build<'a, T: AsRef>(ctx: &ProjectContext, opts: BuildOpts<'a, ) .await?; - // Build image - let mut cmd = Command::new("docker"); - cmd.current_dir(ctx.path()); - cmd.arg("build"); - cmd.arg("-f").arg(dockerfile_path); - // Prints plain console output for debugging - // cmd.arg("--progress=plain"); - cmd.arg("-t").arg(image_tag); - cmd.arg("."); - - let status = cmd.status().await?; - ensure!(status.success()); - } + // Build image + let mut cmd = Command::new("docker"); + cmd.current_dir(ctx.path()); + cmd.arg("build"); + cmd.arg("-f").arg(dockerfile_path); + // Prints plain console output for debugging + // cmd.arg("--progress=plain"); + cmd.arg("-t").arg(image_tag); + cmd.arg("."); + + let status = cmd.status().await?; + ensure!(status.success()); } } } diff --git a/lib/bolt/core/src/dep/k8s/gen.rs b/lib/bolt/core/src/dep/k8s/gen.rs index bcae9ae70..e641b953e 100644 --- a/lib/bolt/core/src/dep/k8s/gen.rs +++ b/lib/bolt/core/src/dep/k8s/gen.rs @@ -33,10 +33,9 @@ pub struct ExecServiceContext { } pub enum ExecServiceDriver { - Docker { - image_tag: String, - force_pull: bool, - }, + /// Used when building an uploading an image to docker. + Docker { image_tag: String, force_pull: bool }, + /// Used when running a build binary locally. LocalBinaryArtifact { /// Path to the executable relative to the project root. exec_path: PathBuf, @@ -67,7 +66,7 @@ pub async fn project(ctx: &ProjectContext) -> Result<()> { { // Read kubectl config let config = match ctx.ns().kubernetes.provider { - ns::KubernetesProvider::K3d {} => block_in_place(move || { + ns::KubernetesProvider::K3d { .. } => block_in_place(move || { cmd!("k3d", "kubeconfig", "get", ctx.k8s_cluster_name()).read() })?, ns::KubernetesProvider::AwsEks {} => { @@ -325,6 +324,7 @@ pub async fn gen_svc(exec_ctx: &ExecServiceContext) -> Vec { ExecServiceDriver::Docker { image_tag, force_pull, + .. } => ( image_tag.as_str(), if *force_pull { diff --git a/lib/bolt/core/src/dep/terraform/output.rs b/lib/bolt/core/src/dep/terraform/output.rs index 23a05c57c..4cebb211b 100644 --- a/lib/bolt/core/src/dep/terraform/output.rs +++ b/lib/bolt/core/src/dep/terraform/output.rs @@ -59,6 +59,12 @@ pub struct KubernetesClusterAws { pub eks_admin_role_arn: TerraformOutputValue, } +#[derive(Debug, Clone, Deserialize)] +pub struct KubernetesClusterK3d { + pub repo_host: TerraformOutputValue, + pub repo_port: TerraformOutputValue, +} + #[derive(Debug, Clone, Deserialize)] pub struct Cockroach { pub host: TerraformOutputValue, @@ -95,6 +101,10 @@ pub async fn read_k8s_cluster_aws(ctx: &ProjectContext) -> KubernetesClusterAws read_plan::(ctx, "k8s_cluster_aws").await } +pub async fn read_k8s_cluster_k3d(ctx: &ProjectContext) -> KubernetesClusterK3d { + read_plan::(ctx, "k8s_cluster_k3d").await +} + pub async fn read_crdb(ctx: &ProjectContext) -> Cockroach { match &ctx.ns().cluster.kind { config::ns::ClusterKind::SingleNode { .. } => { diff --git a/lib/bolt/core/src/tasks/up.rs b/lib/bolt/core/src/tasks/up.rs index ea2c52b32..60aea76ac 100644 --- a/lib/bolt/core/src/tasks/up.rs +++ b/lib/bolt/core/src/tasks/up.rs @@ -21,6 +21,7 @@ use crate::{ dep::{ self, cargo, k8s::gen::{ExecServiceContext, ExecServiceDriver}, + terraform, }, tasks, utils::{self}, @@ -92,13 +93,13 @@ pub async fn up_services>( )?; utils::telemetry::capture_event(ctx, event).await?; - // Generate configs + // Generate configs for the entire project tasks::gen::generate_project(ctx, skip_config_sync_check).await; eprintln!(); rivet_term::status::progress("Preparing", format!("{} services", all_exec_svcs.len())); - // Generate service config + // Generate configs for individual services { eprintln!(); rivet_term::status::progress("Generating", ""); @@ -135,7 +136,7 @@ pub async fn up_services>( } } - // Fetch build plans + // Determine build plans for each service eprintln!(); rivet_term::status::progress("Planning builds", ""); let pb = utils::progress_bar(all_exec_svcs.len()); @@ -247,7 +248,7 @@ pub async fn up_services>( } } - // Build exec ctx contexts + // Build serexec ctx contextsvices that can't be built in a batch eprintln!(); rivet_term::status::progress("Building", "(individual)"); let mut exec_ctxs = Vec::new(); @@ -282,9 +283,10 @@ pub async fn up_services>( derive_local_build_driver(svc_ctx, exec_path.clone()).await } ServiceBuildPlan::ExistingUploadedBuild { image_tag } - | ServiceBuildPlan::BuildAndUpload { image_tag } => { - derive_uploaded_svc_driver(svc_ctx, image_tag.clone(), false).await - } + | ServiceBuildPlan::BuildAndUpload { + pull_image_tag: image_tag, + .. + } => derive_uploaded_svc_driver(svc_ctx, image_tag.clone(), false).await, }, }); diff --git a/scripts/forward/k8s_dashboard.sh b/scripts/forward/k8s_dashboard.sh new file mode 100755 index 000000000..db907318d --- /dev/null +++ b/scripts/forward/k8s_dashboard.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -euf + +export KUBECONFIG="$(bolt output project-root)/gen/k8s/kubeconfig/$(bolt output namespace).yml" + +echo +echo "Token:" +kubectl -n kubernetes-dashboard create token admin-user + +echo +echo "Url:" +echo "http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:https/proxy/#/pod?namespace=_all" + +kubectl proxy \ No newline at end of file