From c82a5dfb5e8af0268d632b45d07814db15526cc6 Mon Sep 17 00:00:00 2001 From: Nathan Flurry Date: Thu, 3 Jul 2025 19:05:53 +0000 Subject: [PATCH] fix(pegboard): include namespace in actor log query --- docker/dev-full/vector-server/vector.yaml | 2 +- docker/monolith/vector-server/vector.yaml | 2 +- packages/core/api/actor/src/route/logs.rs | 2 + .../infra/client/container-runner/Cargo.toml | 2 +- .../container-runner/src/log_shipper.rs | 7 +- .../infra/client/container-runner/src/main.rs | 3 + .../infra/client/manager/src/actor/mod.rs | 15 +- packages/edge/infra/client/manager/src/ctx.rs | 10 +- ...03191728_drop_actor_logs_metadata.down.sql | 0 ...0703191728_drop_actor_logs_metadata.up.sql | 1 + .../20250703194503_actor_logs3.down.sql | 0 .../20250703194503_actor_logs3.up.sql | 19 ++ ...03195340_fix_actor_logs3_metadata.down.sql | 0 ...0703195340_fix_actor_logs3_metadata.up.sql | 36 ++++ .../pegboard/src/ops/actor/log/export.rs | 109 ------------ .../pegboard/src/ops/actor/log/mod.rs | 1 - .../pegboard/src/ops/actor/log/read.rs | 28 +-- .../docs/cloud/self-hosting/client-spec.json | 33 ++++ .../docs/cloud/self-hosting/server-spec.json | 11 +- site/src/content/docs/toolchain-spec.json | 164 ++++++++++++++++-- 20 files changed, 286 insertions(+), 159 deletions(-) create mode 100644 packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.down.sql create mode 100644 packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.up.sql create mode 100644 packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.down.sql create mode 100644 packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.up.sql create mode 100644 packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.down.sql create mode 100644 packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.up.sql delete mode 100644 packages/edge/services/pegboard/src/ops/actor/log/export.rs diff --git a/docker/dev-full/vector-server/vector.yaml b/docker/dev-full/vector-server/vector.yaml index 9a1261580e..6011391ce0 100644 --- a/docker/dev-full/vector-server/vector.yaml +++ b/docker/dev-full/vector-server/vector.yaml @@ -111,7 +111,7 @@ sinks: compression: gzip database: db_pegboard_actor_log endpoint: http://clickhouse:8123 - table: actor_logs2 + table: actor_logs3 auth: strategy: basic user: vector diff --git a/docker/monolith/vector-server/vector.yaml b/docker/monolith/vector-server/vector.yaml index bbf13bd17d..ab93ad6041 100644 --- a/docker/monolith/vector-server/vector.yaml +++ b/docker/monolith/vector-server/vector.yaml @@ -82,7 +82,7 @@ sinks: compression: gzip endpoint: http://clickhouse:9300 database: db_pegboard_actor_log - table: actor_logs2 + table: actor_logs3 auth: strategy: basic user: vector diff --git a/packages/core/api/actor/src/route/logs.rs b/packages/core/api/actor/src/route/logs.rs index 3416bf85b2..e2c1287642 100644 --- a/packages/core/api/actor/src/route/logs.rs +++ b/packages/core/api/actor/src/route/logs.rs @@ -96,6 +96,7 @@ pub async fn get_logs( // frequently and should not return a significant amount of logs. let logs_res = ctx .op(pegboard::ops::actor::log::read::Input { + env_id, actor_ids: actor_ids_clone.clone(), stream_types: stream_types_clone.clone(), count: 64, @@ -136,6 +137,7 @@ pub async fn get_logs( // Read most recent logs ctx.op(pegboard::ops::actor::log::read::Input { + env_id, actor_ids: actor_ids.clone(), stream_types: stream_types.clone(), count: 256, diff --git a/packages/edge/infra/client/container-runner/Cargo.toml b/packages/edge/infra/client/container-runner/Cargo.toml index 1a1efe3f13..6af22bf81f 100644 --- a/packages/edge/infra/client/container-runner/Cargo.toml +++ b/packages/edge/infra/client/container-runner/Cargo.toml @@ -16,8 +16,8 @@ rivet-logs.workspace = true serde = { version = "1.0.195", features = ["derive"] } serde_json = "1.0.111" signal-hook = "0.3.17" +uuid = { version = "1.6.1", features = ["v4"] } [dev-dependencies] portpicker = "0.1.1" tempfile = "3.9.0" -uuid = { version = "1.6.1", features = ["v4"] } diff --git a/packages/edge/infra/client/container-runner/src/log_shipper.rs b/packages/edge/infra/client/container-runner/src/log_shipper.rs index 5fd1e7c416..32354594a2 100644 --- a/packages/edge/infra/client/container-runner/src/log_shipper.rs +++ b/packages/edge/infra/client/container-runner/src/log_shipper.rs @@ -3,6 +3,7 @@ use std::{io::Write, net::TcpStream, sync::mpsc, thread::JoinHandle}; use anyhow::*; use serde::Serialize; use serde_json; +use uuid::Uuid; #[derive(Copy, Clone, Debug)] #[repr(u8)] @@ -37,6 +38,8 @@ pub struct LogShipper { pub vector_socket_addr: String, pub actor_id: String, + + pub env_id: Uuid, } impl LogShipper { @@ -91,7 +94,7 @@ impl LogShipper { while let Result::Ok(message) = self.msg_rx.recv() { let vector_message = VectorMessage::Actors { actor_id: self.actor_id.as_str(), - task: "main", // Backwards compatibility with logs + env_id: self.env_id, stream_type: message.stream_type as u8, ts: message.ts, message: message.message.as_str(), @@ -114,7 +117,7 @@ enum VectorMessage<'a> { #[serde(rename = "actors")] Actors { actor_id: &'a str, - task: &'a str, + env_id: Uuid, stream_type: u8, ts: u64, message: &'a str, diff --git a/packages/edge/infra/client/container-runner/src/main.rs b/packages/edge/infra/client/container-runner/src/main.rs index 81346fdc03..f99fdabcb3 100644 --- a/packages/edge/infra/client/container-runner/src/main.rs +++ b/packages/edge/infra/client/container-runner/src/main.rs @@ -2,6 +2,7 @@ use std::{fs, path::Path, sync::mpsc, time::Duration}; use anyhow::*; use utils::var; +use uuid::Uuid; mod container; mod log_shipper; @@ -36,6 +37,7 @@ fn main() -> Result<()> { .transpose() .context("failed to parse vector socket addr")?; let actor_id = var("ACTOR_ID")?; + let env_id = Uuid::parse_str(&var("ENVIRONMENT_ID")?)?; let (shutdown_tx, shutdown_rx) = mpsc::sync_channel(1); @@ -48,6 +50,7 @@ fn main() -> Result<()> { msg_rx, vector_socket_addr, actor_id, + env_id, }; let log_shipper_thread = log_shipper.spawn(); (Some(msg_tx), Some(log_shipper_thread)) diff --git a/packages/edge/infra/client/manager/src/actor/mod.rs b/packages/edge/infra/client/manager/src/actor/mod.rs index 03e7265306..bc389ba56e 100644 --- a/packages/edge/infra/client/manager/src/actor/mod.rs +++ b/packages/edge/infra/client/manager/src/actor/mod.rs @@ -29,17 +29,24 @@ pub struct Actor { actor_id: Uuid, generation: u32, config: protocol::ActorConfig, + metadata: protocol::ActorMetadata, runner: Mutex>, exited: Mutex, } impl Actor { - pub fn new(actor_id: Uuid, generation: u32, config: protocol::ActorConfig) -> Arc { + pub fn new( + actor_id: Uuid, + generation: u32, + config: protocol::ActorConfig, + metadata: protocol::ActorMetadata, + ) -> Arc { Arc::new(Actor { actor_id, generation, config, + metadata, runner: Mutex::new(None), exited: Mutex::new(false), @@ -50,12 +57,14 @@ impl Actor { actor_id: Uuid, generation: u32, config: protocol::ActorConfig, + metadata: protocol::ActorMetadata, runner: runner::Handle, ) -> Arc { Arc::new(Actor { actor_id, generation, config, + metadata, runner: Mutex::new(Some(runner)), exited: Mutex::new(false), @@ -209,6 +218,10 @@ impl Actor { .to_string(), ), ("ACTOR_ID", self.actor_id.to_string()), + ( + "ENVIRONMENT_ID", + self.metadata.environment.env_id.to_string(), + ), ]; if let Some(vector) = &ctx.config().vector { runner_env.push(("VECTOR_SOCKET_ADDR", vector.address.to_string())); diff --git a/packages/edge/infra/client/manager/src/ctx.rs b/packages/edge/infra/client/manager/src/ctx.rs index b8d7b3ff39..ed550695f6 100644 --- a/packages/edge/infra/client/manager/src/ctx.rs +++ b/packages/edge/infra/client/manager/src/ctx.rs @@ -421,6 +421,8 @@ impl Ctx { generation, config, } => { + let metadata = config.metadata.deserialize()?; + let mut actors = self.actors.write().await; if actors.contains_key(&(actor_id, generation)) { @@ -430,7 +432,7 @@ impl Ctx { "actor with this actor id + generation already exists, ignoring start command", ); } else { - let actor = Actor::new(actor_id, generation, *config); + let actor = Actor::new(actor_id, generation, *config, metadata); // Insert actor actors.insert((actor_id, generation), actor); @@ -718,6 +720,7 @@ impl Ctx { let config = serde_json::from_slice::(&row.config)?; let generation = row.generation.try_into()?; + let metadata = config.metadata.deserialize()?; match &isolate_runner { Some(isolate_runner) if pid == isolate_runner.pid().as_raw() => {} @@ -736,7 +739,7 @@ impl Ctx { } // Clean up actor. We run `cleanup_setup` instead of `cleanup` because `cleanup` publishes events. - let actor = Actor::new(row.actor_id, generation, config); + let actor = Actor::new(row.actor_id, generation, config, metadata); actor.cleanup_setup(self).await; } @@ -878,6 +881,7 @@ impl Ctx { let config = serde_json::from_slice::(&row.config)?; let generation = row.generation.try_into()?; + let metadata = config.metadata.deserialize()?; let runner = match &isolate_runner { // We have to clone the existing isolate runner handle instead of creating a new one so it @@ -901,7 +905,7 @@ impl Ctx { }, }; - let actor = Actor::with_runner(row.actor_id, generation, config, runner); + let actor = Actor::with_runner(row.actor_id, generation, config, metadata, runner); let actor = actors_guard .entry((row.actor_id, generation)) .or_insert(actor); diff --git a/packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.down.sql b/packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.up.sql b/packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.up.sql new file mode 100644 index 0000000000..8e7c5f0cd9 --- /dev/null +++ b/packages/edge/services/pegboard/db/actor-log/migrations/20250703191728_drop_actor_logs_metadata.up.sql @@ -0,0 +1 @@ +DROP VIEW IF EXISTS actor_logs2_with_metadata; diff --git a/packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.down.sql b/packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.up.sql b/packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.up.sql new file mode 100644 index 0000000000..7b3bf2726c --- /dev/null +++ b/packages/edge/services/pegboard/db/actor-log/migrations/20250703194503_actor_logs3.up.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS actor_logs3 ( + namespace LowCardinality(String), + actor_id String, + env_id UUID, + ts DateTime64 (9), + stream_type UInt8, -- pegboard::types::LogsStreamType + message String +) ENGINE = ReplicatedMergeTree () +PARTITION BY + toStartOfHour (ts) +ORDER BY ( + namespace, + env_id, + actor_id, + toUnixTimestamp (ts), + stream_type +) +TTL toDate (ts + toIntervalDay(14)) +SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1; diff --git a/packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.down.sql b/packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.up.sql b/packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.up.sql new file mode 100644 index 0000000000..6ad6971821 --- /dev/null +++ b/packages/edge/services/pegboard/db/actor-log/migrations/20250703195340_fix_actor_logs3_metadata.up.sql @@ -0,0 +1,36 @@ +CREATE MATERIALIZED VIEW IF NOT EXISTS actor_logs3_with_metadata +( + namespace LowCardinality(String), + actor_id String, + ts DateTime64(9), + stream_type UInt8, -- pegboard::types::LogsStreamType + message String, + project_id UUID, + env_id UUID, + datacenter_id UUID, + tags Map(String, String), + build_id UUID, + client_id UUID, + durable Bool +) +ENGINE = ReplicatedMergeTree() +PARTITION BY (env_id, toStartOfHour(ts)) +ORDER BY (env_id, toUnixTimestamp(ts), actor_id, stream_type) +TTL toDate(ts + toIntervalDay(14)) +SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1 +AS SELECT + l.namespace, + l.actor_id, + l.ts, + l.stream_type, + l.message, + a.project_id, + a.env_id, + a.datacenter_id, + a.tags, + a.build_id, + a.client_id, + a.durable +FROM actor_logs3 l +LEFT JOIN db_pegboard_analytics.actors a ON l.namespace = a.namespace AND l.env_id = a.env_id AND l.actor_id = a.actor_id; + diff --git a/packages/edge/services/pegboard/src/ops/actor/log/export.rs b/packages/edge/services/pegboard/src/ops/actor/log/export.rs deleted file mode 100644 index cebb0fe3c4..0000000000 --- a/packages/edge/services/pegboard/src/ops/actor/log/export.rs +++ /dev/null @@ -1,109 +0,0 @@ -use chirp_workflow::prelude::*; -use rivet_operation::prelude::proto::backend; - -use crate::types::LogsStreamType; - -#[derive(Debug)] -pub struct Input { - pub actor_id: Uuid, - pub stream_type: LogsStreamType, -} - -#[derive(Debug)] -pub struct Output { - pub upload_id: Uuid, -} - -#[derive(clickhouse::Row, serde::Deserialize)] -pub struct LogEntry { - pub message: Vec, -} - -#[operation] -pub async fn pegboard_actor_log_read(ctx: &OperationCtx, input: &Input) -> GlobalResult { - let file_name = match input.stream_type { - LogsStreamType::StdOut => "stdout.txt", - LogsStreamType::StdErr => "stderr.txt", - }; - - let mut entries_cursor = ctx - .clickhouse() - .await? - .query(indoc!( - " - SELECT message - FROM db_pegboard_actor_log.actor_logs - WHERE - actor_id = ? AND - stream_type = ? - ORDER BY ts ASC - - UNION ALL - - SELECT message - FROM db_pegboard_actor_log.actor_logs2 - WHERE - actor_id = ? AND - stream_type = ? - ORDER BY ts ASC - " - )) - .bind(input.actor_id) - .bind(input.stream_type as i8) - .bind(input.actor_id.to_string()) - .bind(input.stream_type as i8) - .fetch::()?; - - let mut lines = 0; - let mut buf = Vec::new(); - while let Some(mut entry) = entries_cursor.next().await? { - buf.append(&mut entry.message); - buf.push(b'\n'); - lines += 1; - } - - tracing::info!(?lines, bytes = ?buf.len(), "read all logs"); - - // Upload log - let mime = "text/plain"; - let content_length = buf.len(); - let upload_res = op!([ctx] upload_prepare { - bucket: "bucket-actor-log-export".into(), - files: vec![ - backend::upload::PrepareFile { - path: file_name.into(), - mime: Some(mime.into()), - content_length: content_length as u64, - ..Default::default() - }, - ], - }) - .await?; - - let presigned_req = unwrap!(upload_res.presigned_requests.first()); - let res = reqwest::Client::new() - .put(&presigned_req.url) - .body(buf) - .header(reqwest::header::CONTENT_TYPE, mime) - .header(reqwest::header::CONTENT_LENGTH, content_length) - .send() - .await?; - if res.status().is_success() { - tracing::info!("uploaded successfully"); - } else { - let status = res.status(); - let text = res.text().await; - tracing::error!(?status, ?text, "failed to upload"); - bail!("failed to upload"); - } - - op!([ctx] upload_complete { - upload_id: upload_res.upload_id, - bucket: Some("bucket-pegboard-log-export".into()), - }) - .await?; - - Ok(Output { - upload_id: unwrap!(upload_res.upload_id).as_uuid(), - }) -} diff --git a/packages/edge/services/pegboard/src/ops/actor/log/mod.rs b/packages/edge/services/pegboard/src/ops/actor/log/mod.rs index eaafeae343..2cab8848e7 100644 --- a/packages/edge/services/pegboard/src/ops/actor/log/mod.rs +++ b/packages/edge/services/pegboard/src/ops/actor/log/mod.rs @@ -1,2 +1 @@ -pub mod export; pub mod read; diff --git a/packages/edge/services/pegboard/src/ops/actor/log/read.rs b/packages/edge/services/pegboard/src/ops/actor/log/read.rs index c67e73b5de..e4a3918d41 100644 --- a/packages/edge/services/pegboard/src/ops/actor/log/read.rs +++ b/packages/edge/services/pegboard/src/ops/actor/log/read.rs @@ -4,6 +4,7 @@ use crate::types::LogsStreamType; #[derive(Debug)] pub struct Input { + pub env_id: Uuid, pub actor_ids: Vec, pub stream_types: Vec, pub count: i64, @@ -92,26 +93,13 @@ pub async fn pegboard_actor_log_read(ctx: &OperationCtx, input: &Input) -> Globa ts, message, stream_type, - actor_id_str - FROM ( - SELECT - ts, - message, - stream_type, - toString(actor_id) as actor_id_str - FROM - db_pegboard_actor_log.actor_logs - UNION ALL - SELECT - ts, - message, - stream_type, - actor_id as actor_id_str - FROM - db_pegboard_actor_log.actor_logs2 - ) + actor_id as actor_id_str + FROM + db_pegboard_actor_log.actor_logs3 WHERE - actor_id_str IN ? + namespace = ? + AND env_id = ? + AND actor_id_str IN ? AND stream_type IN ? -- Apply timestamp filtering based on query type AND ( @@ -152,6 +140,8 @@ pub async fn pegboard_actor_log_read(ctx: &OperationCtx, input: &Input) -> Globa // Build query with all parameters and safety restrictions let query_builder = clickhouse .query(&query) + .bind(&ctx.config().server()?.rivet.namespace) + .bind(input.env_id) .bind(&actor_id_strings) .bind(stream_type_values) // Query type parameters diff --git a/site/src/content/docs/cloud/self-hosting/client-spec.json b/site/src/content/docs/cloud/self-hosting/client-spec.json index 32c2482d9e..dbaa2fb394 100644 --- a/site/src/content/docs/cloud/self-hosting/client-spec.json +++ b/site/src/content/docs/cloud/self-hosting/client-spec.json @@ -156,9 +156,31 @@ }, "additionalProperties": false }, + "HostEntry": { + "type": "object", + "required": [ + "hostname", + "ip" + ], + "properties": { + "hostname": { + "type": "string" + }, + "ip": { + "type": "string" + } + }, + "additionalProperties": false + }, "Images": { "type": "object", "properties": { + "max_cache_size": { + "description": "Bytes. Defaults to 64 GiB.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, "pull_addresses": { "$ref": "#/definitions/Addresses" } @@ -260,6 +282,13 @@ "container_runner_binary_path": { "type": "string" }, + "custom_hosts": { + "description": "Custom host entries to append to /etc/hosts in actor containers.", + "type": "array", + "items": { + "$ref": "#/definitions/HostEntry" + } + }, "flavor": { "$ref": "#/definitions/ClientFlavor" }, @@ -275,6 +304,10 @@ "use_mounts": { "description": "Whether or not to use a mount for actor file systems.", "type": "boolean" + }, + "use_resource_constraints": { + "description": "Whether or not to use resource constraints on containers.\n\nYou should enable this if you see this error in development:\n\n``` cannot enter cgroupv2 \"/sys/fs/cgroup/test\" with domain controllers -- it is in an invalid state ````", + "type": "boolean" } }, "additionalProperties": false diff --git a/site/src/content/docs/cloud/self-hosting/server-spec.json b/site/src/content/docs/cloud/self-hosting/server-spec.json index 280b880e40..d3dc074c24 100644 --- a/site/src/content/docs/cloud/self-hosting/server-spec.json +++ b/site/src/content/docs/cloud/self-hosting/server-spec.json @@ -190,10 +190,11 @@ "driver": "redis" }, "tunnel": { - "public_host": "127.0.0.1:8003" + "public_host": "127.0.0.1:6423" }, "ui": { "enable": null, + "proxy_origin": null, "public_origin": null, "public_origin_regex": null }, @@ -1671,7 +1672,7 @@ }, "tunnel": { "default": { - "public_host": "127.0.0.1:8003" + "public_host": "127.0.0.1:6423" }, "allOf": [ { @@ -1682,6 +1683,7 @@ "ui": { "default": { "enable": null, + "proxy_origin": null, "public_origin": null, "public_origin_regex": null }, @@ -1927,6 +1929,11 @@ "description": "Enables serving the UI automatically.\n\nIf disabled, the UI can be hosted separately.", "type": "boolean" }, + "proxy_origin": { + "description": "Origin to proxy UI requests to. This should be the server serving the actula files fro the frontend.\n\nThis is frequently either Vite for a development setup or Nginx for a simple setup.", + "type": "string", + "format": "uri" + }, "public_origin": { "description": "The origin URL for the UI.", "type": "string", diff --git a/site/src/content/docs/toolchain-spec.json b/site/src/content/docs/toolchain-spec.json index 0c3c735ea5..8093e3dabb 100644 --- a/site/src/content/docs/toolchain-spec.json +++ b/site/src/content/docs/toolchain-spec.json @@ -23,6 +23,14 @@ "additionalProperties": { "$ref": "#/definitions/Function" } + }, + "rivetkit": { + "default": null, + "allOf": [ + { + "$ref": "#/definitions/RivetKit" + } + ] } }, "additionalProperties": false, @@ -49,18 +57,18 @@ "Build": { "type": "object", "properties": { - "build_args": { + "buildArgs": { "description": "Build arguments to pass to the build.", "type": "object", "additionalProperties": { "type": "string" } }, - "build_path": { + "buildPath": { "description": "Directory to build the Docker image from.", "type": "string" }, - "build_target": { + "buildTarget": { "description": "Build target to upload.", "type": "string" }, @@ -95,11 +103,11 @@ "unstable": { "default": { "minify": null, - "analyze_result": null, - "esbuild_log_level": null, + "analyzeResult": null, + "esbuildLogLevel": null, "compression": null, - "dump_build": null, - "no_bundler": null + "dumpBuild": null, + "noBundler": null }, "allOf": [ { @@ -124,6 +132,13 @@ "enum": [ "native" ] + }, + { + "description": "Use Rivet Cloud for building.", + "type": "string", + "enum": [ + "remote" + ] } ] }, @@ -133,14 +148,14 @@ "description": "Legacy option. Docker image archive output from `docker save`. Slower lobby start times.", "type": "string", "enum": [ - "docker_image" + "dockerImage" ] }, { "description": "OCI bundle archive derived from a generated Docker image. Optimized for fast lobby start times.", "type": "string", "enum": [ - "oci_bundle" + "ociBundle" ] } ] @@ -176,7 +191,7 @@ "properties": { "networking": { "default": { - "internal_port": null + "internalPort": null }, "allOf": [ { @@ -195,10 +210,20 @@ } ] }, - "route_subpaths": { + "routeSubpaths": { "type": "boolean" }, - "strip_prefix": { + "runtime": { + "default": { + "environment": null + }, + "allOf": [ + { + "$ref": "#/definitions/FunctionRuntime" + } + ] + }, + "stripPrefix": { "default": null, "type": "boolean" }, @@ -213,13 +238,24 @@ "FunctionNetworking": { "type": "object", "properties": { - "internal_port": { + "internalPort": { "type": "integer", "format": "uint16", "minimum": 0.0 } } }, + "FunctionRuntime": { + "type": "object", + "properties": { + "environment": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, "Resources": { "type": "object", "required": [ @@ -239,13 +275,103 @@ } } }, + "RivetKit": { + "type": "object", + "required": [ + "registry", + "server" + ], + "properties": { + "buildArgs": { + "description": "Build arguments to pass to the build.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "buildPath": { + "description": "Directory to build the Docker image from.", + "type": "string" + }, + "buildTarget": { + "description": "Build target to upload.", + "type": "string" + }, + "dockerfile": { + "description": "Dockerfile to build.", + "type": "string" + }, + "image": { + "description": "Existing image tag to upload.", + "type": "string" + }, + "networking": { + "default": { + "internalPort": null + }, + "allOf": [ + { + "$ref": "#/definitions/FunctionNetworking" + } + ] + }, + "path": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "resources": { + "default": null, + "allOf": [ + { + "$ref": "#/definitions/Resources" + } + ] + }, + "routeSubpaths": { + "type": "boolean" + }, + "runtime": { + "default": { + "environment": null + }, + "allOf": [ + { + "$ref": "#/definitions/FunctionRuntime" + } + ] + }, + "server": { + "type": "string" + }, + "stripPrefix": { + "default": null, + "type": "boolean" + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "unstable": { + "description": "Unstable features.", + "allOf": [ + { + "$ref": "#/definitions/Unstable" + } + ] + } + } + }, "Unstable": { "type": "object", "properties": { - "allow_root": { + "allowRoot": { "type": "boolean" }, - "build_method": { + "buildMethod": { "$ref": "#/definitions/BuildMethod" }, "bundle": { @@ -260,22 +386,22 @@ "Unstable2": { "type": "object", "properties": { - "analyze_result": { + "analyzeResult": { "type": "boolean" }, "compression": { "$ref": "#/definitions/Compression" }, - "dump_build": { + "dumpBuild": { "type": "boolean" }, - "esbuild_log_level": { + "esbuildLogLevel": { "type": "string" }, "minify": { "type": "boolean" }, - "no_bundler": { + "noBundler": { "type": "boolean" } },