diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 379491ca2..000000000 --- a/.dockerignore +++ /dev/null @@ -1,43 +0,0 @@ -# Make sure to update .gitignore appropriately -# -# Make sure to keep the `gen` folder because this is where we generate our -# Dockerfiles to. - -# Git -# **/.git/ -# **/.gitignore - -**/.DS_Store -**/symbolCache.db - -# Vagrant -.vagrant-remote -.vagrant-digitalocean-token -**/.vagrant/ - -# Docker -**/Dockerfile - -# Bolt -**/tests/ -.bolt-meta.json -.volumes/ -Bolt.local.toml -secrets/ - -# Rivet -infra/ -!infra/misc/svc_scripts/ -!infra/default-builds/ -oss/infra/ -!oss/infra/misc/svc_scripts/ -!oss/infra/default-builds/ - -# Rust -**/target/ -**/*.rustfmt - -# JavaScript -**/.cache/ -**/dist/ -**/node_modules/ diff --git a/infra/tf/k8s_cluster_k3d/main.tf b/infra/tf/k8s_cluster_k3d/main.tf index 2ccd3241b..1c6eb8f8d 100644 --- a/infra/tf/k8s_cluster_k3d/main.tf +++ b/infra/tf/k8s_cluster_k3d/main.tf @@ -7,6 +7,11 @@ terraform { } } +locals { + repo_host = "svc" + repo_port = 5001 +} + resource "k3d_cluster" "main" { name = "rivet-${var.namespace}" @@ -80,6 +85,13 @@ resource "k3d_cluster" "main" { node_filters = ["server:0"] } + registries { + create { + name = "svc" + host_port = local.repo_port + } + } + k3s { extra_args { arg = "--disable=traefik" diff --git a/infra/tf/k8s_cluster_k3d/output.tf b/infra/tf/k8s_cluster_k3d/output.tf index 7fde52f77..3bce75352 100644 --- a/infra/tf/k8s_cluster_k3d/output.tf +++ b/infra/tf/k8s_cluster_k3d/output.tf @@ -2,3 +2,10 @@ output "traefik_external_ip" { value = var.public_ip } +output "repo_host" { + value = local.repo_host +} + +output "repo_port" { + value = local.repo_port +} diff --git a/lib/bolt/config/src/ns.rs b/lib/bolt/config/src/ns.rs index 326e6e9a4..2e3dcbea3 100644 --- a/lib/bolt/config/src/ns.rs +++ b/lib/bolt/config/src/ns.rs @@ -306,6 +306,7 @@ pub struct Docker { /// /// See [here](https://docs.docker.com/docker-hub/download-rate-limit) for /// more information on Docker Hub's rate limits. + #[serde(default)] pub authenticate_all_docker_hub_pulls: bool, /// Docker repository to upload builds to. Must end in a slash. #[serde(default = "default_docker_repo")] @@ -341,14 +342,21 @@ pub struct Kubernetes { #[derive(Serialize, Deserialize, Clone, Debug)] pub enum KubernetesProvider { #[serde(rename = "k3d")] - K3d {}, + K3d { + /// Tells bolt to use the K3d managed registry for svc builds. This will override ns.docker.repository + /// for image uploads. + #[serde(default)] + use_local_repo: bool, + }, #[serde(rename = "aws_eks")] AwsEks {}, } impl Default for KubernetesProvider { fn default() -> Self { - Self::K3d {} + Self::K3d { + use_local_repo: false, + } } } diff --git a/lib/bolt/core/src/context/project.rs b/lib/bolt/core/src/context/project.rs index eaf4e282c..ef659c968 100644 --- a/lib/bolt/core/src/context/project.rs +++ b/lib/bolt/core/src/context/project.rs @@ -11,7 +11,7 @@ use sha2::{Digest, Sha256}; use tokio::{fs, sync::Mutex}; use super::{RunContext, ServiceContext}; -use crate::{config, context, utils::command_helper::CommandHelper}; +use crate::{config, context, dep::terraform, utils::command_helper::CommandHelper}; pub type ProjectContext = Arc; @@ -343,6 +343,11 @@ impl ProjectContextData { "cannot enable billing without emailing" ); } + + assert!( + self.ns().docker.repository.ends_with('/'), + "docker repository must end with slash" + ); } // Traverses from FS root to CWD, returns first directory with Bolt.toml @@ -1017,6 +1022,36 @@ impl ProjectContextData { } } +impl ProjectContextData { + /// Gets the correct repo to push svc builds to/pull from. + pub async fn docker_repos(self: &Arc) -> (String, String) { + match self.ns().kubernetes.provider { + config::ns::KubernetesProvider::K3d { use_local_repo } if use_local_repo => { + let output = terraform::output::read_k8s_cluster_k3d(self).await; + let local_repo = format!("localhost:{}/", *output.repo_port); + let internal_repo = format!("{}:{}/", *output.repo_host, *output.repo_port); + + (local_repo, internal_repo) + } + _ => ( + self.ns().docker.repository.clone(), + self.ns().docker.repository.clone(), + ), + } + } + + /// Whether or not to build svc images locally vs inside of docker. + pub fn build_svcs_locally(&self) -> bool { + match self.ns().kubernetes.provider { + config::ns::KubernetesProvider::K3d { use_local_repo } if use_local_repo => false, + _ => matches!( + &self.ns().cluster.kind, + config::ns::ClusterKind::SingleNode { .. } + ), + } + } +} + impl ProjectContextData { pub fn leader_count(&self) -> usize { match &self.ns().cluster.kind { diff --git a/lib/bolt/core/src/context/service.rs b/lib/bolt/core/src/context/service.rs index fc0700925..b55455412 100644 --- a/lib/bolt/core/src/context/service.rs +++ b/lib/bolt/core/src/context/service.rs @@ -384,48 +384,52 @@ pub enum ServiceBuildPlan { ExistingUploadedBuild { image_tag: String }, /// Build the service and upload to Docker. - BuildAndUpload { image_tag: String }, + BuildAndUpload { + /// Push location (local repo) + push_image_tag: String, + /// Pull location (inside of k8s) + pull_image_tag: String, + }, } impl ServiceContextData { /// Determines if this service needs to be recompiled. pub async fn build_plan(&self, build_context: &BuildContext) -> Result { + let project_ctx = self.project().await; + // Check if build exists on docker.io - let pub_image_tag = self.docker_image_tag(Some("docker.io/rivetgg/")).await?; + let pub_image_tag = self.docker_image_tag(&project_ctx, "docker.io/rivetgg/")?; if docker::cli::container_exists(&pub_image_tag).await { return Ok(ServiceBuildPlan::ExistingUploadedBuild { image_tag: pub_image_tag, }); } - // Check if build exists in custom repo - let image_tag = self.docker_image_tag(None).await?; + // Check if build exists in config repo + let image_tag = self.docker_image_tag(&project_ctx, &project_ctx.ns().docker.repository)?; if docker::cli::container_exists(&image_tag).await { return Ok(ServiceBuildPlan::ExistingUploadedBuild { image_tag }); } - let project_ctx = self.project().await; + if project_ctx.build_svcs_locally() { + // Derive the build path + let optimization = match &build_context { + BuildContext::Bin { optimization } => optimization, + BuildContext::Test { .. } => &BuildOptimization::Debug, + }; + let output_path = self.rust_bin_path(optimization).await; - match &project_ctx.ns().cluster.kind { - // Build locally - config::ns::ClusterKind::SingleNode { .. } => { - // Derive the build path - let optimization = match &build_context { - BuildContext::Bin { optimization } => optimization, - BuildContext::Test { .. } => &BuildOptimization::Debug, - }; - let output_path = self.rust_bin_path(optimization).await; + // Rust libs always attempt to rebuild (handled by cargo) + Ok(ServiceBuildPlan::BuildLocally { + exec_path: output_path, + }) + } else { + let (push_repo, pull_repo) = project_ctx.docker_repos().await; - // Rust libs always attempt to rebuild (handled by cargo) - Ok(ServiceBuildPlan::BuildLocally { - exec_path: output_path, - }) - } - // Build and upload to S3 - config::ns::ClusterKind::Distributed { .. } => { - // Default to building - Ok(ServiceBuildPlan::BuildAndUpload { image_tag }) - } + Ok(ServiceBuildPlan::BuildAndUpload { + push_image_tag: self.docker_image_tag(&project_ctx, &push_repo)?, + pull_image_tag: self.docker_image_tag(&project_ctx, &pull_repo)?, + }) } } } @@ -1320,12 +1324,10 @@ impl ServiceContextData { } impl ServiceContextData { - pub async fn docker_image_tag(&self, override_repo: Option<&str>) -> Result { - let project_ctx = self.project().await; + pub fn docker_image_tag(&self, project_ctx: &ProjectContext, repo: &str) -> Result { + ensure!(repo.ends_with('/'), "docker repository must end with slash"); let source_hash = project_ctx.source_hash(); - let repo = override_repo.unwrap_or(&project_ctx.ns().docker.repository); - ensure!(repo.ends_with('/'), "docker repository must end with slash"); Ok(format!( "{}{}:{}", @@ -1336,7 +1338,9 @@ impl ServiceContextData { } pub async fn upload_build(&self) -> Result<()> { - let image_tag = self.docker_image_tag(None).await?; + let project_ctx = self.project().await; + let (push_repo, _) = project_ctx.docker_repos().await; + let image_tag = self.docker_image_tag(&project_ctx, &push_repo)?; let mut cmd = Command::new("docker"); cmd.arg("push"); diff --git a/lib/bolt/core/src/dep/cargo/cli.rs b/lib/bolt/core/src/dep/cargo/cli.rs index 9fd1864d3..590105257 100644 --- a/lib/bolt/core/src/dep/cargo/cli.rs +++ b/lib/bolt/core/src/dep/cargo/cli.rs @@ -1,13 +1,38 @@ use std::path::{Path, PathBuf}; use anyhow::{ensure, Context, Result}; -use indoc::formatdoc; +use indoc::{formatdoc, indoc}; use regex::Regex; use serde_json::json; use tokio::{fs, io::AsyncReadExt, process::Command, task::block_in_place}; use crate::{config, context::ProjectContext}; +const DOCKERIGNORE: &str = indoc!( + r#" + * + # Rivet + !Bolt.toml + !gen/docker + !gen/build_script.sh + sdks/runtime + oss/sdks/runtime + !lib + !oss/lib + !svc + svc/**/*.md + !oss/svc + oss/svc/**/*.md + !proto + !oss/proto + !sdks/full/rust/src + !sdks/full/rust/Cargo.toml + !oss/sdks/full/rust/src + !oss/sdks/full/rust/Cargo.toml + !oss/errors + "# +); + pub struct BuildOpts<'a, T: AsRef> { pub build_calls: Vec>, /// Builds for release mode. @@ -104,150 +129,170 @@ pub async fn build<'a, T: AsRef>(ctx: &ProjectContext, opts: BuildOpts<'a, fs::write(&build_script_path, build_script).await?; // Execute build command - match &ctx.ns().cluster.kind { - config::ns::ClusterKind::SingleNode { .. } => { - // Make build script executable - let mut cmd = Command::new("chmod"); + if ctx.build_svcs_locally() { + // Make build script executable + let mut cmd = Command::new("chmod"); + cmd.current_dir(ctx.path()); + cmd.arg("+x"); + cmd.arg(build_script_path.display().to_string()); + let status = cmd.status().await?; + ensure!(status.success()); + + // Execute + let mut cmd = Command::new(build_script_path.display().to_string()); + cmd.current_dir(ctx.path()); + let status = cmd.status().await?; + ensure!(status.success()); + } else { + let optimization = if opts.release { "release" } else { "debug" }; + // Get repo to push to + let (push_repo, _) = ctx.docker_repos().await; + let source_hash = ctx.source_hash(); + + // Create directory for docker files + let gen_path = ctx.gen_path().join("docker"); + fs::create_dir_all(&gen_path).await?; + + // Build all of the base binaries in batch to optimize build speed + // + // We could do this as a single multi-stage Docker container, but it requires + // re-hashing the entire project every time to check the build layers and can be + // faulty sometimes. + let build_image_tag = { + let image_tag = format!("{push_repo}build:{source_hash}"); + let dockerfile_path = gen_path.join(format!("Dockerfile.build")); + + // TODO: Use --secret to pass sccache credentials instead of the build script. + fs::write( + &dockerfile_path, + formatdoc!( + r#" + # syntax=docker/dockerfile:1.2 + + FROM rust:1.77.2-slim AS rust + + RUN apt-get update && apt-get install -y protobuf-compiler pkg-config libssl-dev g++ git + + RUN apt-get install --yes libpq-dev wget + RUN wget https://github.com/mozilla/sccache/releases/download/v0.2.15/sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz \ + && tar xzf sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz \ + && mv sccache-v0.2.15-x86_64-unknown-linux-musl/sccache /usr/local/bin/sccache \ + && chmod +x /usr/local/bin/sccache + + WORKDIR /usr/rivet + + COPY . . + COPY {build_script_path} build_script.sh + + RUN chmod +x ./build_script.sh + RUN \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/rivet/target \ + --mount=type=cache,target=/usr/rivet/oss/target \ + sh -c ./build_script.sh && mkdir /usr/bin/rivet && find target/{optimization} -maxdepth 1 -type f ! -name "*.*" -exec mv {{}} /usr/bin/rivet/ \; + + # Copy all binaries from target directory into an empty image (it is not included in + # the output because of cache mount) + + # Create an empty image and copy binaries to it (this is to minimize the size of the image) + FROM scratch + COPY --from=rust /usr/bin/rivet/ / + "#, + build_script_path = build_script_path_relative.display(), + ), + ) + .await?; + + // Check if we need to include default builds in the build context + let has_default_builds = opts + .build_calls + .iter() + .flat_map(|call| call.bins) + .any(|bin| bin.as_ref() == "build-default-create"); + + let dockerignore_path = gen_path.join("Dockerfile.build.dockerignore"); + fs::write( + &dockerignore_path, + if has_default_builds { + format!("{DOCKERIGNORE}\n!infra/default-builds/\n!oss/infra/default-builds/") + } else { + DOCKERIGNORE.to_string() + }, + ) + .await?; + + // Build image + let mut cmd = Command::new("docker"); + cmd.env("DOCKER_BUILDKIT", "1"); cmd.current_dir(ctx.path()); - cmd.arg("+x"); - cmd.arg(build_script_path.display().to_string()); - let status = cmd.status().await?; - ensure!(status.success()); + cmd.arg("build"); + cmd.arg("-f").arg(dockerfile_path); + // Prints plain console output for debugging + // cmd.arg("--progress=plain"); + cmd.arg("-t").arg(&image_tag); + cmd.arg("."); - // Execute - let mut cmd = Command::new(build_script_path.display().to_string()); - cmd.current_dir(ctx.path()); let status = cmd.status().await?; ensure!(status.success()); - } - config::ns::ClusterKind::Distributed { .. } => { - let optimization = if opts.release { "release" } else { "debug" }; - let repo = &ctx.ns().docker.repository; - ensure!(repo.ends_with('/'), "docker repository must end with slash"); - let source_hash = ctx.source_hash(); - - // Create directory for docker files - let gen_path = ctx.gen_path().join("docker"); - fs::create_dir_all(&gen_path).await?; - - // Build all of the base binaries in batch to optimize build speed - // - // We could do this as a single multi-stage Docker container, but it requires - // re-hashing the entire project every time to check the build layers and can be - // faulty sometimes. - let build_image_tag = { - let image_tag = format!("{repo}build:{source_hash}"); - let dockerfile_path = gen_path.join(format!("Dockerfile.build")); - - // TODO: Use --secret to pass sccache credentials instead of the build script. + + image_tag + }; + + for call in &opts.build_calls { + for bin in call.bins { + let bin = bin.as_ref(); + + // Resolve the symlink for the svc_scripts dir since Docker does not resolve + // symlinks in COPY commands + let infra_path = ctx.path().join("infra"); + let infra_path_resolved = tokio::fs::read_link(&infra_path) + .await + .map_or_else(|_| infra_path.clone(), |path| ctx.path().join(path)); + let svc_scripts_path = infra_path_resolved.join("misc").join("svc_scripts"); + let svc_scripts_path_relative = svc_scripts_path + .strip_prefix(ctx.path()) + .context("failed to strip prefix")?; + + // Build the final image + let image_tag = format!("{push_repo}{bin}:{source_hash}"); + let dockerfile_path = gen_path.join(format!("Dockerfile.{bin}")); fs::write( &dockerfile_path, formatdoc!( r#" - # syntax=docker/dockerfile:1.2 - - FROM rust:1.77.2-slim - - RUN apt-get update && apt-get install -y protobuf-compiler pkg-config libssl-dev g++ git - - RUN apt-get install --yes libpq-dev wget - RUN wget https://github.com/mozilla/sccache/releases/download/v0.2.15/sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz \ - && tar xzf sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz \ - && mv sccache-v0.2.15-x86_64-unknown-linux-musl/sccache /usr/local/bin/sccache \ - && chmod +x /usr/local/bin/sccache - - WORKDIR /usr/rivet - - COPY . . - COPY {build_script_path} build_script.sh - - RUN chmod +x ./build_script.sh - RUN \ - --mount=type=cache,target=/usr/rivet/target \ - --mount=type=cache,target=/usr/rivet/oss/target \ - sh -c ./build_script.sh - - # Copy all binaries from target directory (it is not included in the output because of cache mount) - RUN \ - --mount=type=cache,target=/usr/rivet/target \ - --mount=type=cache,target=/usr/rivet/oss/target \ - find target/{optimization} -maxdepth 1 -type f ! -name "*.*" -exec cp {{}} /usr/bin/ \; + FROM {build_image_tag} AS build + + FROM debian:12.1-slim AS run + + # Update ca-certificates. Install curl for health checks. + RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && apt-get install -y --no-install-recommends ca-certificates openssl curl + + # Copy supporting scripts + COPY {svc_scripts_path}/health_check.sh {svc_scripts_path}/install_ca.sh /usr/bin/ + RUN chmod +x /usr/bin/health_check.sh /usr/bin/install_ca.sh + + # Copy final binary + COPY --from=build {bin} /usr/bin/{bin} "#, - build_script_path = build_script_path_relative.display(), + svc_scripts_path = svc_scripts_path_relative.display(), ), ) .await?; // Build image let mut cmd = Command::new("docker"); + cmd.env("DOCKER_BUILDKIT", "1"); cmd.current_dir(ctx.path()); cmd.arg("build"); cmd.arg("-f").arg(dockerfile_path); // Prints plain console output for debugging // cmd.arg("--progress=plain"); - cmd.arg("-t").arg(&image_tag); + cmd.arg("-t").arg(image_tag); cmd.arg("."); let status = cmd.status().await?; ensure!(status.success()); - - image_tag - }; - - for call in &opts.build_calls { - for bin in call.bins { - let bin = bin.as_ref(); - - // Resolve the symlink for the svc_scripts dir since Docker does not resolve - // symlinks in COPY commands - let infra_path = ctx.path().join("infra"); - let infra_path_resolved = tokio::fs::read_link(&infra_path) - .await - .map_or_else(|_| infra_path.clone(), |path| ctx.path().join(path)); - let svc_scripts_path = infra_path_resolved.join("misc").join("svc_scripts"); - let svc_scripts_path_relative = svc_scripts_path - .strip_prefix(ctx.path()) - .context("failed to strip prefix")?; - - // Build the final image - let image_tag = format!("{repo}{bin}:{source_hash}"); - let dockerfile_path = gen_path.join(format!("Dockerfile.{bin}")); - fs::write( - &dockerfile_path, - formatdoc!( - r#" - FROM {build_image_tag} AS build - - FROM debian:12.1-slim AS run - - # Update ca-certificates. Install curl for health checks. - RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && apt-get install -y --no-install-recommends ca-certificates openssl curl - - # Copy supporting scripts - COPY {svc_scripts_path}/health_check.sh {svc_scripts_path}/install_ca.sh /usr/bin/ - RUN chmod +x /usr/bin/health_check.sh /usr/bin/install_ca.sh - - # Copy final binary - COPY --from=build /usr/bin/{bin} /usr/bin/{bin} - "#, - svc_scripts_path = svc_scripts_path_relative.display(), - ), - ) - .await?; - - // Build image - let mut cmd = Command::new("docker"); - cmd.current_dir(ctx.path()); - cmd.arg("build"); - cmd.arg("-f").arg(dockerfile_path); - // Prints plain console output for debugging - // cmd.arg("--progress=plain"); - cmd.arg("-t").arg(image_tag); - cmd.arg("."); - - let status = cmd.status().await?; - ensure!(status.success()); - } } } } diff --git a/lib/bolt/core/src/dep/k8s/gen.rs b/lib/bolt/core/src/dep/k8s/gen.rs index bcae9ae70..e641b953e 100644 --- a/lib/bolt/core/src/dep/k8s/gen.rs +++ b/lib/bolt/core/src/dep/k8s/gen.rs @@ -33,10 +33,9 @@ pub struct ExecServiceContext { } pub enum ExecServiceDriver { - Docker { - image_tag: String, - force_pull: bool, - }, + /// Used when building an uploading an image to docker. + Docker { image_tag: String, force_pull: bool }, + /// Used when running a build binary locally. LocalBinaryArtifact { /// Path to the executable relative to the project root. exec_path: PathBuf, @@ -67,7 +66,7 @@ pub async fn project(ctx: &ProjectContext) -> Result<()> { { // Read kubectl config let config = match ctx.ns().kubernetes.provider { - ns::KubernetesProvider::K3d {} => block_in_place(move || { + ns::KubernetesProvider::K3d { .. } => block_in_place(move || { cmd!("k3d", "kubeconfig", "get", ctx.k8s_cluster_name()).read() })?, ns::KubernetesProvider::AwsEks {} => { @@ -325,6 +324,7 @@ pub async fn gen_svc(exec_ctx: &ExecServiceContext) -> Vec { ExecServiceDriver::Docker { image_tag, force_pull, + .. } => ( image_tag.as_str(), if *force_pull { diff --git a/lib/bolt/core/src/dep/terraform/output.rs b/lib/bolt/core/src/dep/terraform/output.rs index 23a05c57c..4cebb211b 100644 --- a/lib/bolt/core/src/dep/terraform/output.rs +++ b/lib/bolt/core/src/dep/terraform/output.rs @@ -59,6 +59,12 @@ pub struct KubernetesClusterAws { pub eks_admin_role_arn: TerraformOutputValue, } +#[derive(Debug, Clone, Deserialize)] +pub struct KubernetesClusterK3d { + pub repo_host: TerraformOutputValue, + pub repo_port: TerraformOutputValue, +} + #[derive(Debug, Clone, Deserialize)] pub struct Cockroach { pub host: TerraformOutputValue, @@ -95,6 +101,10 @@ pub async fn read_k8s_cluster_aws(ctx: &ProjectContext) -> KubernetesClusterAws read_plan::(ctx, "k8s_cluster_aws").await } +pub async fn read_k8s_cluster_k3d(ctx: &ProjectContext) -> KubernetesClusterK3d { + read_plan::(ctx, "k8s_cluster_k3d").await +} + pub async fn read_crdb(ctx: &ProjectContext) -> Cockroach { match &ctx.ns().cluster.kind { config::ns::ClusterKind::SingleNode { .. } => { diff --git a/lib/bolt/core/src/tasks/up.rs b/lib/bolt/core/src/tasks/up.rs index ea2c52b32..60aea76ac 100644 --- a/lib/bolt/core/src/tasks/up.rs +++ b/lib/bolt/core/src/tasks/up.rs @@ -21,6 +21,7 @@ use crate::{ dep::{ self, cargo, k8s::gen::{ExecServiceContext, ExecServiceDriver}, + terraform, }, tasks, utils::{self}, @@ -92,13 +93,13 @@ pub async fn up_services>( )?; utils::telemetry::capture_event(ctx, event).await?; - // Generate configs + // Generate configs for the entire project tasks::gen::generate_project(ctx, skip_config_sync_check).await; eprintln!(); rivet_term::status::progress("Preparing", format!("{} services", all_exec_svcs.len())); - // Generate service config + // Generate configs for individual services { eprintln!(); rivet_term::status::progress("Generating", ""); @@ -135,7 +136,7 @@ pub async fn up_services>( } } - // Fetch build plans + // Determine build plans for each service eprintln!(); rivet_term::status::progress("Planning builds", ""); let pb = utils::progress_bar(all_exec_svcs.len()); @@ -247,7 +248,7 @@ pub async fn up_services>( } } - // Build exec ctx contexts + // Build serexec ctx contextsvices that can't be built in a batch eprintln!(); rivet_term::status::progress("Building", "(individual)"); let mut exec_ctxs = Vec::new(); @@ -282,9 +283,10 @@ pub async fn up_services>( derive_local_build_driver(svc_ctx, exec_path.clone()).await } ServiceBuildPlan::ExistingUploadedBuild { image_tag } - | ServiceBuildPlan::BuildAndUpload { image_tag } => { - derive_uploaded_svc_driver(svc_ctx, image_tag.clone(), false).await - } + | ServiceBuildPlan::BuildAndUpload { + pull_image_tag: image_tag, + .. + } => derive_uploaded_svc_driver(svc_ctx, image_tag.clone(), false).await, }, }); diff --git a/scripts/forward/k8s_dashboard.sh b/scripts/forward/k8s_dashboard.sh new file mode 100755 index 000000000..db907318d --- /dev/null +++ b/scripts/forward/k8s_dashboard.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -euf + +export KUBECONFIG="$(bolt output project-root)/gen/k8s/kubeconfig/$(bolt output namespace).yml" + +echo +echo "Token:" +kubectl -n kubernetes-dashboard create token admin-user + +echo +echo "Url:" +echo "http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:https/proxy/#/pod?namespace=_all" + +kubectl proxy \ No newline at end of file