From 31d4a685034e28e10edf1b0bd5a6d8c37aa3fbe4 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Fri, 16 Feb 2024 16:46:43 -0300 Subject: [PATCH 01/23] remove old .env.example file, add get_block script for testing --- .env.example | 20 ------------- Cargo.lock | 10 +++++++ Cargo.toml | 2 +- .../packages/kns_indexer/get_block/Cargo.toml | 17 +++++++++++ .../packages/kns_indexer/get_block/src/lib.rs | 29 +++++++++++++++++++ kinode/packages/kns_indexer/pkg/scripts.json | 13 +++++++++ 6 files changed, 70 insertions(+), 21 deletions(-) delete mode 100644 .env.example create mode 100644 kinode/packages/kns_indexer/get_block/Cargo.toml create mode 100644 kinode/packages/kns_indexer/get_block/src/lib.rs create mode 100644 kinode/packages/kns_indexer/pkg/scripts.json diff --git a/.env.example b/.env.example deleted file mode 100644 index 0bf65bf99..000000000 --- a/.env.example +++ /dev/null @@ -1,20 +0,0 @@ -### node filesystem config, adjust according to your needs. -### note, will run with default values if no .env is found. - -### for example, if you use remote s3, you might want to set chunk_size to 5mb, and adjust flush_to_cold_interval - -### Default values automatically set (s3 defaults to None) - -# MEM_BUFFER_LIMIT=5242880 # 5mb -# READ_CACHE_LIMIT=5242880 # 5mb -# CHUNK_SIZE=262144 # 256kb -# FLUSH_TO_COLD_INTERVAL=60 # 60s -# ENCRYPTION=true # true -# CLOUD_ENABLED=false # false, controls whether new writes will be to s3 or local - -### Example s3 config -# S3_ACCESS_KEY=minioadmin -# S3_SECRET__KEY=minioadmin -# S3_REGION=eu-north-1 -# S3_BUCKET=mylittlebucket -# S3_ENDPOINT=http://localhost:9000 diff --git a/Cargo.lock b/Cargo.lock index aaf5b4869..affc5680e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1963,6 +1963,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "get_block" +version = "0.1.0" +dependencies = [ + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=3232423)", + "serde", + "serde_json", + "wit-bindgen", +] + [[package]] name = "getrandom" version = "0.2.12" diff --git a/Cargo.toml b/Cargo.toml index 5e8178580..cbd8822cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ members = [ "kinode/packages/app_store/download", "kinode/packages/app_store/install", "kinode/packages/app_store/uninstall", "kinode/packages/chess/chess", "kinode/packages/homepage/homepage", - "kinode/packages/kns_indexer/kns_indexer", + "kinode/packages/kns_indexer/kns_indexer", "kinode/packages/kns_indexer/get_block", "kinode/packages/terminal/terminal", "kinode/packages/terminal/alias", "kinode/packages/terminal/cat", "kinode/packages/terminal/echo", "kinode/packages/terminal/hi", "kinode/packages/terminal/m", "kinode/packages/terminal/top", "kinode/packages/tester/tester", "kinode/packages/tester/test_runner", diff --git a/kinode/packages/kns_indexer/get_block/Cargo.toml b/kinode/packages/kns_indexer/get_block/Cargo.toml new file mode 100644 index 000000000..c604f349e --- /dev/null +++ b/kinode/packages/kns_indexer/get_block/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "get_block" +version = "0.1.0" +edition = "2021" + + +[dependencies] +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } + +[lib] +crate-type = ["cdylib"] + +[package.metadata.component] +package = "kinode:process" diff --git a/kinode/packages/kns_indexer/get_block/src/lib.rs b/kinode/packages/kns_indexer/get_block/src/lib.rs new file mode 100644 index 000000000..61a0e9f0b --- /dev/null +++ b/kinode/packages/kns_indexer/get_block/src/lib.rs @@ -0,0 +1,29 @@ +use kinode_process_lib::{ + await_next_request_body, call_init, eth::get_block_number, println, Address, Request, SendError, +}; + +wit_bindgen::generate!({ + path: "wit", + world: "process", + exports: { + world: Component, + }, +}); + +call_init!(init); + +fn init(our: Address) { + let Ok(_args) = await_next_request_body() else { + println!("get_block: failed to get args, aborting"); + return; + }; + + match get_block_number() { + Ok(block_number) => { + println!("latest block number: {block_number}"); + } + Err(e) => { + println!("get_block: failed to get block number: {}", e); + } + } +} diff --git a/kinode/packages/kns_indexer/pkg/scripts.json b/kinode/packages/kns_indexer/pkg/scripts.json new file mode 100644 index 000000000..8d4119cab --- /dev/null +++ b/kinode/packages/kns_indexer/pkg/scripts.json @@ -0,0 +1,13 @@ +{ + "get_block.wasm": { + "root": false, + "public": false, + "request_networking": false, + "request_capabilities": [ + "eth:distro:sys" + ], + "grant_capabilities": [ + "eth:distro:sys" + ] + } +} \ No newline at end of file From b52768d1799966692a45927dea74f92af543af79 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Sat, 17 Feb 2024 19:34:29 -0300 Subject: [PATCH 02/23] WIP: multi-provider model --- kinode/src/eth/provider.rs | 724 ++++++++++++++++++------------------- kinode/src/kernel/mod.rs | 8 +- kinode/src/main.rs | 84 ++--- kinode/src/net/types.rs | 35 +- lib/src/core.rs | 36 ++ lib/src/eth.rs | 18 +- lib/src/net.rs | 0 7 files changed, 444 insertions(+), 461 deletions(-) create mode 100644 lib/src/net.rs diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index 4f45a605f..24935fa28 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -7,389 +7,383 @@ use anyhow::Result; use dashmap::DashMap; use lib::types::core::*; use lib::types::eth::*; +use serde::{Deserialize, Serialize}; use std::str::FromStr; use std::sync::Arc; use tokio::task::JoinHandle; use url::Url; -/// Provider config. Can currently be a node or a ws provider instance. -/// Future: add chainId configs, several nodes and fallbacks. -pub enum ProviderConfig { - Node(String), - Provider(Provider), -} - /// The ETH provider runtime process is responsible for connecting to one or more ETH RPC providers -/// and using them to service indexing requests from other apps. This could also be done by a wasm +/// and using them to service indexing requests from other apps. This could also be done by a Wasm /// app, but in the future, this process will hopefully expand in scope to perform more complex /// indexing and ETH node responsibilities. pub async fn provider( our: String, - provider_node: ProviderInput, - public: bool, + configs: SavedConfigs, send_to_loop: MessageSender, mut recv_in_client: MessageReceiver, - _print_tx: PrintSender, + print_tx: PrintSender, ) -> Result<()> { let our = Arc::new(our); - // Initialize the provider conditionally based on rpc_url - // Todo: make provider support multiple transports, one direct and another passthrough. - let provider_config = match provider_node { - ProviderInput::Ws(rpc_url) => { - // Validate and parse the WebSocket URL - match Url::parse(&rpc_url)?.scheme() { - "ws" | "wss" => { - let connector = WsConnect { - url: rpc_url, - auth: None, - }; - let client = ClientBuilder::default().ws(connector).await?; - ProviderConfig::Provider(Provider::new_with_client(client)) - } - _ => { - return Err(anyhow::anyhow!( - "Only `ws://` or `wss://` URLs are supported." - )) - } - } - } - ProviderInput::Node(node_id) => { - // Directly use the node ID - ProviderConfig::Node(node_id) - } - }; - - let provider_config = Arc::new(provider_config); - - // handles of longrunning subscriptions. - let connections: DashMap<(ProcessId, u64), JoinHandle>> = DashMap::new(); - let connections = Arc::new(connections); - - // add whitelist, logic in provider middleware? - while let Some(km) = recv_in_client.recv().await { - // clone Arcs - let our = our.clone(); - let send_to_loop = send_to_loop.clone(); - let provider_config = provider_config.clone(); - let connections = connections.clone(); - tokio::spawn(async move { - if let Err(e) = handle_message( - &our, - &km, - &send_to_loop, - provider_config, - connections, - public, - ) - .await - { - let _ = send_to_loop - .send(make_error_message(our.to_string(), km, e)) - .await; - }; - }); - } + // // Initialize the provider conditionally based on rpc_url + // // Todo: make provider support multiple transports, one direct and another passthrough. + // let provider_config = match provider_node { + // ProviderInput::Ws(rpc_url) => { + // // Validate and parse the WebSocket URL + // match Url::parse(&rpc_url)?.scheme() { + // "ws" | "wss" => { + // let connector = WsConnect { + // url: rpc_url, + // auth: None, + // }; + // let client = ClientBuilder::default().ws(connector).await?; + // ProviderConfig::Provider(Provider::new_with_client(client)) + // } + // _ => { + // return Err(anyhow::anyhow!( + // "Only `ws://` or `wss://` URLs are supported." + // )) + // } + // } + // } + // ProviderInput::Node(node_id) => { + // // Directly use the node ID + // ProviderConfig::Node(node_id) + // } + // }; + + // let provider_config = Arc::new(provider_config); + + // // handles of longrunning subscriptions. + // let connections: DashMap<(ProcessId, u64), JoinHandle>> = DashMap::new(); + // let connections = Arc::new(connections); + + // while let Some(km) = recv_in_client.recv().await { + // // clone Arcs + // let our = our.clone(); + // let send_to_loop = send_to_loop.clone(); + // let provider_config = provider_config.clone(); + // let connections = connections.clone(); + // tokio::spawn(async move { + // if let Err(e) = handle_message( + // &our, + // &km, + // &send_to_loop, + // provider_config, + // connections, + // public, + // ) + // .await + // { + // let _ = send_to_loop + // .send(make_error_message(our.to_string(), km, e)) + // .await; + // }; + // }); + // } Err(anyhow::anyhow!("eth: fatal: message receiver closed!")) } -async fn handle_message( - our: &str, - km: &KernelMessage, - send_to_loop: &MessageSender, - provider_config: Arc, - connections: Arc>>>, - public: bool, -) -> Result<(), EthError> { - match &km.message { - Message::Request(req) => { - match &*provider_config { - ProviderConfig::Node(node) => { - if km.source.node == our { - // we have no provider, let's send this request to someone who has one. - let request = KernelMessage { - id: km.id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: Address { - node: "jugodenaranja.os".to_string(), - process: ETH_PROCESS_ID.clone(), - }, - rsvp: Some(km.source.clone()), - message: Message::Request(req.clone()), - lazy_load_blob: None, - }; - - let _ = send_to_loop.send(request).await; - } else { - // either someone asking us for rpc, or we are passing through a sub event. - handle_remote_request(our, km, send_to_loop, None, connections, public) - .await? - } - } - ProviderConfig::Provider(provider) => { - if km.source.node == our { - handle_local_request(our, km, send_to_loop, &provider, connections, public) - .await? - } else { - handle_remote_request( - our, - km, - send_to_loop, - Some(provider), - connections, - public, - ) - .await? - } - } - } - } - Message::Response(_) => { - // handle passthrough responses, send to rsvp. - if km.source.process == "eth:distro:sys" { - if let Some(rsvp) = &km.rsvp { - let _ = send_to_loop - .send(KernelMessage { - id: rand::random(), - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: rsvp.clone(), - rsvp: None, - message: km.message.clone(), - lazy_load_blob: None, - }) - .await; - } - } - } - } - Ok(()) -} - -async fn handle_local_request( - our: &str, - km: &KernelMessage, - send_to_loop: &MessageSender, - provider: &Provider, - connections: Arc>>>, - public: bool, -) -> Result<(), EthError> { - let Message::Request(req) = &km.message else { - return Err(EthError::InvalidMethod( - "eth: only accepts requests".to_string(), - )); - }; - let action = serde_json::from_slice::(&req.body).map_err(|e| { - EthError::InvalidMethod(format!("eth: failed to deserialize request: {:?}", e)) - })?; - - // we might want some of these in payloads.. sub items? - let return_body: EthResponse = match action { - EthAction::SubscribeLogs { - sub_id, - kind, - params, - } => { - let sub_id = (km.target.process.clone(), sub_id); - - let kind = serde_json::to_value(&kind).unwrap(); - let params = serde_json::to_value(¶ms).unwrap(); - - let id = provider - .inner() - .prepare("eth_subscribe", [kind, params]) - .await - .map_err(|e| EthError::TransportError(e.to_string()))?; - - let rx = provider.inner().get_raw_subscription(id).await; - let handle = tokio::spawn(handle_subscription_stream( - our.to_string(), - sub_id.1.clone(), - rx, - km.source.clone(), - km.rsvp.clone(), - send_to_loop.clone(), - )); - - connections.insert(sub_id, handle); - EthResponse::Ok - } - EthAction::UnsubscribeLogs(sub_id) => { - let sub_id = (km.target.process.clone(), sub_id); - let handle = connections - .remove(&sub_id) - .ok_or(EthError::SubscriptionNotFound)?; - - handle.1.abort(); - EthResponse::Ok - } - EthAction::Request { method, params } => { - let method = to_static_str(&method).ok_or(EthError::InvalidMethod(method))?; - - let response: serde_json::Value = provider - .inner() - .prepare(method, params) - .await - .map_err(|e| EthError::TransportError(e.to_string()))?; - EthResponse::Response { value: response } - } - }; - if let Some(_) = req.expects_response { - let _ = send_to_loop - .send(KernelMessage { - id: km.id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: km.source.clone(), - rsvp: km.rsvp.clone(), - message: Message::Response(( - Response { - inherit: false, - body: serde_json::to_vec(&return_body).unwrap(), - metadata: req.metadata.clone(), - capabilities: vec![], - }, - None, - )), - lazy_load_blob: None, - }) - .await; - } - - Ok(()) -} - -// here we are either processing another nodes request. -// or we are passing through an ethSub Request.. -async fn handle_remote_request( - our: &str, - km: &KernelMessage, - send_to_loop: &MessageSender, - provider: Option<&Provider>, - connections: Arc>>>, - public: bool, -) -> Result<(), EthError> { - let Message::Request(req) = &km.message else { - return Err(EthError::InvalidMethod( - "eth: only accepts requests".to_string(), - )); - }; - - if let Some(provider) = provider { - // we need some sort of agreement perhaps on rpc providing. - // even with an agreement, fake ethsubevents could be sent to us. - // light clients could verify blocks perhaps... - if !public { - return Err(EthError::PermissionDenied("not on the list.".to_string())); - } - - let action = serde_json::from_slice::(&req.body).map_err(|e| { - EthError::InvalidMethod(format!("eth: failed to deserialize request: {:?}", e)) - })?; - - let return_body: EthResponse = match action { - EthAction::SubscribeLogs { - sub_id, - kind, - params, - } => { - let sub_id = (km.target.process.clone(), sub_id); - - let kind = serde_json::to_value(&kind).unwrap(); - let params = serde_json::to_value(¶ms).unwrap(); - - let id = provider - .inner() - .prepare("eth_subscribe", [kind, params]) - .await - .map_err(|e| EthError::TransportError(e.to_string()))?; - - let rx = provider.inner().get_raw_subscription(id).await; - let handle = tokio::spawn(handle_subscription_stream( - our.to_string(), - sub_id.1.clone(), - rx, - km.target.clone(), - km.rsvp.clone(), - send_to_loop.clone(), - )); - - connections.insert(sub_id, handle); - EthResponse::Ok - } - EthAction::UnsubscribeLogs(sub_id) => { - let sub_id = (km.target.process.clone(), sub_id); - let handle = connections - .remove(&sub_id) - .ok_or(EthError::SubscriptionNotFound)?; - - handle.1.abort(); - EthResponse::Ok - } - EthAction::Request { method, params } => { - let method = to_static_str(&method).ok_or(EthError::InvalidMethod(method))?; - - let response: serde_json::Value = provider - .inner() - .prepare(method, params) - .await - .map_err(|e| EthError::TransportError(e.to_string()))?; - - EthResponse::Response { value: response } - } - }; - - let response = KernelMessage { - id: km.id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: km.source.clone(), - rsvp: km.rsvp.clone(), - message: Message::Response(( - Response { - inherit: false, - body: serde_json::to_vec(&return_body).unwrap(), - metadata: req.metadata.clone(), - capabilities: vec![], - }, - None, - )), - lazy_load_blob: None, - }; - - let _ = send_to_loop.send(response).await; - } else { - // We do not have a provider, this is a reply for a request made by us. - if let Ok(eth_sub) = serde_json::from_slice::(&req.body) { - // forward... - if let Some(target) = km.rsvp.clone() { - let _ = send_to_loop - .send(KernelMessage { - id: rand::random(), - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: target, - rsvp: None, - message: Message::Request(req.clone()), - lazy_load_blob: None, - }) - .await; - } - } - } - Ok(()) -} +/// Handle an incoming message. +// async fn handle_message( +// our: &str, +// km: &KernelMessage, +// send_to_loop: &MessageSender, +// provider_config: Arc, +// connections: Arc>>>, +// public: bool, +// ) -> Result<(), EthError> { +// match &km.message { +// Message::Request(req) => { +// match &*provider_config { +// ProviderConfig::Node(node) => { +// if km.source.node == our { +// // we have no provider, let's send this request to someone who has one. +// let request = KernelMessage { +// id: km.id, +// source: Address { +// node: our.to_string(), +// process: ETH_PROCESS_ID.clone(), +// }, +// target: Address { +// node: "jugodenaranja.os".to_string(), +// process: ETH_PROCESS_ID.clone(), +// }, +// rsvp: Some(km.source.clone()), +// message: Message::Request(req.clone()), +// lazy_load_blob: None, +// }; + +// let _ = send_to_loop.send(request).await; +// } else { +// // either someone asking us for rpc, or we are passing through a sub event. +// handle_remote_request(our, km, send_to_loop, None, connections, public) +// .await? +// } +// } +// ProviderConfig::Provider(provider) => { +// if km.source.node == our { +// handle_local_request(our, km, send_to_loop, &provider, connections, public) +// .await? +// } else { +// handle_remote_request( +// our, +// km, +// send_to_loop, +// Some(provider), +// connections, +// public, +// ) +// .await? +// } +// } +// } +// } +// Message::Response(_) => { +// // handle passthrough responses, send to rsvp. +// if km.source.process == "eth:distro:sys" { +// if let Some(rsvp) = &km.rsvp { +// let _ = send_to_loop +// .send(KernelMessage { +// id: rand::random(), +// source: Address { +// node: our.to_string(), +// process: ETH_PROCESS_ID.clone(), +// }, +// target: rsvp.clone(), +// rsvp: None, +// message: km.message.clone(), +// lazy_load_blob: None, +// }) +// .await; +// } +// } +// } +// } +// Ok(()) +// } + +/// Handle a local request. +// async fn handle_local_request( +// our: &str, +// km: &KernelMessage, +// send_to_loop: &MessageSender, +// provider: &Provider, +// connections: Arc>>>, +// public: bool, +// ) -> Result<(), EthError> { +// let Message::Request(req) = &km.message else { +// return Err(EthError::InvalidMethod( +// "eth: only accepts requests".to_string(), +// )); +// }; +// let action = serde_json::from_slice::(&req.body).map_err(|e| { +// EthError::InvalidMethod(format!("eth: failed to deserialize request: {:?}", e)) +// })?; + +// // we might want some of these in payloads.. sub items? +// let return_body: EthResponse = match action { +// EthAction::SubscribeLogs { +// sub_id, +// kind, +// params, +// } => { +// let sub_id = (km.target.process.clone(), sub_id); + +// let kind = serde_json::to_value(&kind).unwrap(); +// let params = serde_json::to_value(¶ms).unwrap(); + +// let id = provider +// .inner() +// .prepare("eth_subscribe", [kind, params]) +// .await +// .map_err(|e| EthError::TransportError(e.to_string()))?; + +// let rx = provider.inner().get_raw_subscription(id).await; +// let handle = tokio::spawn(handle_subscription_stream( +// our.to_string(), +// sub_id.1.clone(), +// rx, +// km.source.clone(), +// km.rsvp.clone(), +// send_to_loop.clone(), +// )); + +// connections.insert(sub_id, handle); +// EthResponse::Ok +// } +// EthAction::UnsubscribeLogs(sub_id) => { +// let sub_id = (km.target.process.clone(), sub_id); +// let handle = connections +// .remove(&sub_id) +// .ok_or(EthError::SubscriptionNotFound)?; + +// handle.1.abort(); +// EthResponse::Ok +// } +// EthAction::Request { method, params } => { +// let method = to_static_str(&method).ok_or(EthError::InvalidMethod(method))?; + +// let response: serde_json::Value = provider +// .inner() +// .prepare(method, params) +// .await +// .map_err(|e| EthError::TransportError(e.to_string()))?; +// EthResponse::Response { value: response } +// } +// }; +// if let Some(_) = req.expects_response { +// let _ = send_to_loop +// .send(KernelMessage { +// id: km.id, +// source: Address { +// node: our.to_string(), +// process: ETH_PROCESS_ID.clone(), +// }, +// target: km.source.clone(), +// rsvp: km.rsvp.clone(), +// message: Message::Response(( +// Response { +// inherit: false, +// body: serde_json::to_vec(&return_body).unwrap(), +// metadata: req.metadata.clone(), +// capabilities: vec![], +// }, +// None, +// )), +// lazy_load_blob: None, +// }) +// .await; +// } + +// Ok(()) +// } + +/// here we are either processing another nodes request. +/// or we are passing through an ethSub Request.. +// async fn handle_remote_request( +// our: &str, +// km: &KernelMessage, +// send_to_loop: &MessageSender, +// provider: Option<&Provider>, +// connections: Arc>>>, +// public: bool, +// ) -> Result<(), EthError> { +// let Message::Request(req) = &km.message else { +// return Err(EthError::InvalidMethod( +// "eth: only accepts requests".to_string(), +// )); +// }; + +// if let Some(provider) = provider { +// // we need some sort of agreement perhaps on rpc providing. +// // even with an agreement, fake ethsubevents could be sent to us. +// // light clients could verify blocks perhaps... +// if !public { +// return Err(EthError::PermissionDenied("not on the list.".to_string())); +// } + +// let action = serde_json::from_slice::(&req.body).map_err(|e| { +// EthError::InvalidMethod(format!("eth: failed to deserialize request: {:?}", e)) +// })?; + +// let return_body: EthResponse = match action { +// EthAction::SubscribeLogs { +// sub_id, +// kind, +// params, +// } => { +// let sub_id = (km.target.process.clone(), sub_id); + +// let kind = serde_json::to_value(&kind).unwrap(); +// let params = serde_json::to_value(¶ms).unwrap(); + +// let id = provider +// .inner() +// .prepare("eth_subscribe", [kind, params]) +// .await +// .map_err(|e| EthError::TransportError(e.to_string()))?; + +// let rx = provider.inner().get_raw_subscription(id).await; +// let handle = tokio::spawn(handle_subscription_stream( +// our.to_string(), +// sub_id.1.clone(), +// rx, +// km.target.clone(), +// km.rsvp.clone(), +// send_to_loop.clone(), +// )); + +// connections.insert(sub_id, handle); +// EthResponse::Ok +// } +// EthAction::UnsubscribeLogs(sub_id) => { +// let sub_id = (km.target.process.clone(), sub_id); +// let handle = connections +// .remove(&sub_id) +// .ok_or(EthError::SubscriptionNotFound)?; + +// handle.1.abort(); +// EthResponse::Ok +// } +// EthAction::Request { method, params } => { +// let method = to_static_str(&method).ok_or(EthError::InvalidMethod(method))?; + +// let response: serde_json::Value = provider +// .inner() +// .prepare(method, params) +// .await +// .map_err(|e| EthError::TransportError(e.to_string()))?; + +// EthResponse::Response { value: response } +// } +// }; + +// let response = KernelMessage { +// id: km.id, +// source: Address { +// node: our.to_string(), +// process: ETH_PROCESS_ID.clone(), +// }, +// target: km.source.clone(), +// rsvp: km.rsvp.clone(), +// message: Message::Response(( +// Response { +// inherit: false, +// body: serde_json::to_vec(&return_body).unwrap(), +// metadata: req.metadata.clone(), +// capabilities: vec![], +// }, +// None, +// )), +// lazy_load_blob: None, +// }; + +// let _ = send_to_loop.send(response).await; +// } else { +// // We do not have a provider, this is a reply for a request made by us. +// if let Ok(eth_sub) = serde_json::from_slice::(&req.body) { +// // forward... +// if let Some(target) = km.rsvp.clone() { +// let _ = send_to_loop +// .send(KernelMessage { +// id: rand::random(), +// source: Address { +// node: our.to_string(), +// process: ETH_PROCESS_ID.clone(), +// }, +// target: target, +// rsvp: None, +// message: Message::Request(req.clone()), +// lazy_load_blob: None, +// }) +// .await; +// } +// } +// } +// Ok(()) +// } /// Executed as a long-lived task. The JoinHandle is stored in the `connections` map. /// This task is responsible for connecting to the ETH RPC provider and streaming logs diff --git a/kinode/src/kernel/mod.rs b/kinode/src/kernel/mod.rs index 574afd641..608af0c72 100644 --- a/kinode/src/kernel/mod.rs +++ b/kinode/src/kernel/mod.rs @@ -672,7 +672,7 @@ pub async fn kernel( home_directory_path: String, contract_address: String, runtime_extensions: Vec<(t::ProcessId, t::MessageSender, bool)>, - default_pki_entries: Vec, + default_pki_entries: Vec, ) -> Result<()> { let mut config = Config::new(); config.cache_config_load_default().unwrap(); @@ -844,10 +844,8 @@ pub async fn kernel( message: t::Message::Request(t::Request { inherit: false, expects_response: None, - body: rmp_serde::to_vec(&crate::net::NetActions::KnsBatchUpdate( - default_pki_entries, - )) - .unwrap(), + body: rmp_serde::to_vec(&t::NetActions::KnsBatchUpdate(default_pki_entries)) + .unwrap(), metadata: None, capabilities: vec![], }), diff --git a/kinode/src/main.rs b/kinode/src/main.rs index 1cbc710ff..7c04d97e0 100644 --- a/kinode/src/main.rs +++ b/kinode/src/main.rs @@ -2,17 +2,14 @@ use anyhow::Result; use clap::{arg, value_parser, Command}; -use rand::seq::SliceRandom; +use lib::types::core::*; +#[cfg(feature = "simulation-mode")] +use ring::{rand::SystemRandom, signature, signature::KeyPair}; use std::env; use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; use tokio::{fs, time::timeout}; -use lib::types::core::*; - -#[cfg(feature = "simulation-mode")] -use ring::{rand::SystemRandom, signature, signature::KeyPair}; - mod eth; mod http; mod kernel; @@ -110,14 +107,7 @@ async fn main() { arg!(--testnet "If set, use Sepolia testnet") .default_value("false") .value_parser(value_parser!(bool)), - ) - .arg( - arg!(--public "If set, allow rpc passthrough") - .default_value("false") - .value_parser(value_parser!(bool)), - ) - .arg(arg!(--rpcnode "RPC node provider must be a valid address").required(false)) - .arg(arg!(--rpc "Ethereum RPC endpoint (must be wss://)").required(false)); + ); #[cfg(feature = "simulation-mode")] let app = app @@ -141,9 +131,6 @@ async fn main() { None => (8080, false), }; let on_testnet = *matches.get_one::("testnet").unwrap(); - let public = *matches.get_one::("public").unwrap(); - let rpc_url = matches.get_one::("rpc").cloned(); - let rpc_node = matches.get_one::("rpcnode").cloned(); #[cfg(not(feature = "simulation-mode"))] let is_detached = false; @@ -190,47 +177,24 @@ async fn main() { } } + if let Err(e) = fs::create_dir_all(home_directory_path).await { + panic!("failed to create home directory: {:?}", e); + } + println!("home at {}\r", home_directory_path); + // default eth providers/routers - type KnsUpdate = crate::net::KnsUpdate; - let default_pki_entries: Vec = - match fs::read_to_string(format!("{}/.default_providers", home_directory_path)).await { - Ok(contents) => serde_json::from_str(&contents).unwrap(), + let eth_provider_config: lib::eth::SavedConfigs = + match fs::read_to_string(format!("{}/.saved_providers", home_directory_path)).await { + Ok(contents) => { + println!("loaded saved providers\r"); + serde_json::from_str(&contents).unwrap() + } Err(_) => match on_testnet { true => serde_json::from_str(DEFAULT_PROVIDERS_TESTNET).unwrap(), false => serde_json::from_str(DEFAULT_PROVIDERS_MAINNET).unwrap(), }, }; - type ProviderInput = lib::eth::ProviderInput; - let eth_provider: ProviderInput; - - match (rpc_url.clone(), rpc_node) { - (Some(url), Some(_)) => { - println!("passed both node and url for rpc, using url."); - eth_provider = ProviderInput::Ws(url); - } - (Some(url), None) => { - eth_provider = ProviderInput::Ws(url); - } - (None, Some(ref node)) => { - println!("trying to use remote node for rpc: {}", node); - eth_provider = ProviderInput::Node(node.clone()); - } - (None, None) => { - let random_provider = default_pki_entries.choose(&mut rand::thread_rng()).unwrap(); - let default_provider = random_provider.name.clone(); - - println!("no rpc provided, using a default: {}", default_provider); - - eth_provider = ProviderInput::Node(default_provider); - } - } - - if let Err(e) = fs::create_dir_all(home_directory_path).await { - panic!("failed to create home directory: {:?}", e); - } - println!("home at {}\r", home_directory_path); - // kernel receives system messages via this channel, all other modules send messages let (kernel_message_sender, kernel_message_receiver): (MessageSender, MessageReceiver) = mpsc::channel(EVENT_LOOP_CHANNEL_CAPACITY); @@ -502,7 +466,20 @@ async fn main() { home_directory_path.clone(), contract_address.to_string(), runtime_extensions, - default_pki_entries, + // from saved eth provider config, filter for node identities which will be + // bootstrapped into the networking module, so that this node can start + // getting PKI info ("bootstrap") + eth_provider_config + .clone() + .into_iter() + .filter_map(|config| { + if let lib::eth::NodeOrRpcUrl::Node(kns_update) = config.provider { + Some(kns_update) + } else { + None + } + }) + .collect(), )); #[cfg(not(feature = "simulation-mode"))] tasks.spawn(net::networking( @@ -574,8 +551,7 @@ async fn main() { #[cfg(not(feature = "simulation-mode"))] tasks.spawn(eth::provider::provider( our.name.clone(), - eth_provider, - public, + eth_provider_config, kernel_message_sender.clone(), eth_provider_receiver, print_sender.clone(), diff --git a/kinode/src/net/types.rs b/kinode/src/net/types.rs index fd8f11571..da850cfa0 100644 --- a/kinode/src/net/types.rs +++ b/kinode/src/net/types.rs @@ -1,5 +1,6 @@ use dashmap::DashMap; use futures::stream::{SplitSink, SplitStream}; +use lib::types::core::*; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::sync::Arc; @@ -7,8 +8,6 @@ use tokio::net::TcpStream; use tokio::sync::mpsc::UnboundedSender; use tokio_tungstenite::{tungstenite, MaybeTlsStream, WebSocketStream}; -use lib::types::core::*; - /// Sent to a node when you want to connect directly to them. /// Sent in the 'e, ee, s, es' and 's, se' phases of XX noise protocol pattern. #[derive(Debug, Deserialize, Serialize)] @@ -86,35 +85,3 @@ pub struct Peer { pub routing_for: bool, pub sender: UnboundedSender, } - -/// Must be parsed from message pack vector. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum NetActions { - /// Received from a router of ours when they have a new pending passthrough for us. - /// We should respond (if we desire) by using them to initialize a routed connection - /// with the NodeId given. - ConnectionRequest(NodeId), - /// can only receive from trusted source, for now just ourselves locally, - /// in the future could get from remote provider - KnsUpdate(KnsUpdate), - KnsBatchUpdate(Vec), -} - -/// For now, only sent in response to a ConnectionRequest. -/// Must be parsed from message pack vector -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum NetResponses { - Accepted(NodeId), - Rejected(NodeId), -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct KnsUpdate { - pub name: String, // actual username / domain name - pub owner: String, - pub node: String, // hex namehash of node - pub public_key: String, - pub ip: String, - pub port: u16, - pub routers: Vec, -} diff --git a/lib/src/core.rs b/lib/src/core.rs index bfbd8203a..a3fc37695 100644 --- a/lib/src/core.rs +++ b/lib/src/core.rs @@ -1522,3 +1522,39 @@ pub enum TimerAction { Debug, SetTimer(u64), } + +// +// networking protocol types +// + +/// Must be parsed from message pack vector. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum NetActions { + /// Received from a router of ours when they have a new pending passthrough for us. + /// We should respond (if we desire) by using them to initialize a routed connection + /// with the NodeId given. + ConnectionRequest(NodeId), + /// can only receive from trusted source, for now just ourselves locally, + /// in the future could get from remote provider + KnsUpdate(KnsUpdate), + KnsBatchUpdate(Vec), +} + +/// For now, only sent in response to a ConnectionRequest. +/// Must be parsed from message pack vector +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum NetResponses { + Accepted(NodeId), + Rejected(NodeId), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct KnsUpdate { + pub name: String, // actual username / domain name + pub owner: String, + pub node: String, // hex namehash of node + pub public_key: String, + pub ip: String, + pub port: u16, + pub routers: Vec, +} diff --git a/lib/src/eth.rs b/lib/src/eth.rs index e36564b96..58a3addc9 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -103,7 +103,19 @@ pub fn to_static_str(method: &str) -> Option<&'static str> { } } -pub enum ProviderInput { - Ws(String), - Node(String), +pub type SavedConfigs = Vec; + +/// Provider config. Can currently be a node or a ws provider instance. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ProviderConfig { + pub chain_id: u64, + pub usable: bool, + pub trusted: bool, + pub provider: NodeOrRpcUrl, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum NodeOrRpcUrl { + Node(crate::core::KnsUpdate), + RpcUrl(String), } diff --git a/lib/src/net.rs b/lib/src/net.rs new file mode 100644 index 000000000..e69de29bb From 96f47a77cb296331962ceee0324dd68332911957 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Wed, 21 Feb 2024 12:31:32 -0300 Subject: [PATCH 03/23] WIP scaffolding multi-chain multi-provider --- kinode/build.rs | 5 + kinode/default_providers_mainnet.json | 79 ++-- kinode/default_providers_testnet.json | 79 ++-- kinode/src/eth/provider.rs | 508 ++++++++++++++++---------- kinode/src/main.rs | 1 + lib/src/eth.rs | 72 +++- 6 files changed, 495 insertions(+), 249 deletions(-) diff --git a/kinode/build.rs b/kinode/build.rs index 4ea37ee56..443c2959d 100644 --- a/kinode/build.rs +++ b/kinode/build.rs @@ -46,6 +46,11 @@ fn build_and_zip_package( } fn main() -> anyhow::Result<()> { + if std::env::var("SKIP_BUILD_SCRIPT").is_ok() { + println!("Skipping build script"); + return Ok(()); + } + let pwd = std::env::current_dir()?; let parent_dir = pwd.parent().unwrap(); let packages_dir = pwd.join("packages"); diff --git a/kinode/default_providers_mainnet.json b/kinode/default_providers_mainnet.json index 46f041c27..a01899ae3 100644 --- a/kinode/default_providers_mainnet.json +++ b/kinode/default_providers_mainnet.json @@ -1,29 +1,66 @@ [ { - "name": "default-router-1.os", - "owner": "", - "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", - "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", - "ip": "147.135.114.167", - "port": 9005, - "routers": [] + "chain_id": 1, + "trusted": false, + "provider": { + "RpcUrl": "wss://ethereum.publicnode.com" + }, + "public": false, + "allow": [], + "deny": [] }, { - "name": "default-router-2.os", - "owner": "", - "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", - "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", - "ip": "147.135.114.167", - "port": 9006, - "routers": [] + "chain_id": 10, + "trusted": true, + "provider": { + "Node": { + "name": "default-router-1.os", + "owner": "", + "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", + "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", + "ip": "147.135.114.167", + "port": 9005, + "routers": [] + } + }, + "public": false, + "allow": [], + "deny": [] }, { - "name": "default-router-3.os", - "owner": "", - "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", - "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", - "ip": "147.135.114.167", - "port": 9007, - "routers": [] + "chain_id": 10, + "trusted": true, + "provider": { + "Node": { + "name": "default-router-2.os", + "owner": "", + "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", + "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", + "ip": "147.135.114.167", + "port": 9006, + "routers": [] + } + }, + "public": false, + "allow": [], + "deny": [] + }, + { + "chain_id": 10, + "trusted": true, + "provider": { + "Node": { + "name": "default-router-3.os", + "owner": "", + "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", + "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", + "ip": "147.135.114.167", + "port": 9007, + "routers": [] + } + }, + "public": false, + "allow": [], + "deny": [] } ] \ No newline at end of file diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index af47dce33..c502f9591 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -1,29 +1,66 @@ [ { - "name": "default-router-1.os", - "owner": "", - "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", - "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", - "ip": "147.135.114.167", - "port": 9002, - "routers": [] + "chain_id": 1, + "trusted": false, + "provider": { + "RpcUrl": "wss://ethereum.publicnode.com" + }, + "public": false, + "allow": [], + "deny": [] }, { - "name": "default-router-2.os", - "owner": "", - "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", - "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", - "ip": "147.135.114.167", - "port": 9003, - "routers": [] + "chain_id": 11155111, + "trusted": true, + "provider": { + "Node": { + "name": "default-router-1.os", + "owner": "", + "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", + "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", + "ip": "147.135.114.167", + "port": 9002, + "routers": [] + } + }, + "public": false, + "allow": [], + "deny": [] }, { - "name": "default-router-3.os", - "owner": "", - "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", - "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", - "ip": "147.135.114.167", - "port": 9004, - "routers": [] + "chain_id": 11155111, + "trusted": true, + "provider": { + "Node": { + "name": "default-router-2.os", + "owner": "", + "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", + "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", + "ip": "147.135.114.167", + "port": 9003, + "routers": [] + } + }, + "public": false, + "allow": [], + "deny": [] + }, + { + "chain_id": 11155111, + "trusted": true, + "provider": { + "Node": { + "name": "default-router-3.os", + "owner": "", + "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", + "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", + "ip": "147.135.114.167", + "port": 9004, + "routers": [] + } + }, + "public": false, + "allow": [], + "deny": [] } ] \ No newline at end of file diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index 24935fa28..da89ba2fe 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -8,161 +8,289 @@ use dashmap::DashMap; use lib::types::core::*; use lib::types::eth::*; use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; use tokio::task::JoinHandle; use url::Url; +/// mapping of chain id to ordered(TODO) list of providers +type Providers = Arc>; + +struct ActiveProviders { + pub urls: Vec, + pub nodes: Vec, +} + +struct UrlProvider { + pub trusted: bool, + pub url: String, + pub pubsub: Option>, +} + +struct NodeProvider { + pub trusted: bool, + pub name: String, +} + +/// existing subscriptions held by local processes +type ActiveSubscriptions = Arc>>; + +enum ActiveSub { + Local(JoinHandle>), + Remote(String), // name of node providing this subscription for us +} + +impl ActiveProviders { + fn add_provider_config(&mut self, new: ProviderConfig) { + match new.provider { + NodeOrRpcUrl::Node(update) => { + self.nodes.push(NodeProvider { + trusted: new.trusted, + name: update.name, + }); + } + NodeOrRpcUrl::RpcUrl(url) => { + self.urls.push(UrlProvider { + trusted: new.trusted, + url, + pubsub: None, + }); + } + } + } + + fn remove_provider(&mut self, remove: &str) { + self.urls.retain(|x| x.url != remove); + self.nodes.retain(|x| x.name != remove); + } +} + +async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { + match Url::parse(&provider.url)?.scheme() { + "ws" | "wss" => { + let connector = WsConnect { + url: provider.url.to_string(), + auth: None, + }; + let client = ClientBuilder::default().ws(connector).await?; + provider.pubsub = Some(Provider::new_with_client(client)); + Ok(()) + } + _ => Err(anyhow::anyhow!( + "Only `ws://` or `wss://` providers are supported." + )), + } +} + /// The ETH provider runtime process is responsible for connecting to one or more ETH RPC providers -/// and using them to service indexing requests from other apps. This could also be done by a Wasm -/// app, but in the future, this process will hopefully expand in scope to perform more complex -/// indexing and ETH node responsibilities. +/// and using them to service indexing requests from other apps. pub async fn provider( our: String, configs: SavedConfigs, send_to_loop: MessageSender, mut recv_in_client: MessageReceiver, + caps_oracle: CapMessageSender, print_tx: PrintSender, ) -> Result<()> { let our = Arc::new(our); - // // Initialize the provider conditionally based on rpc_url - // // Todo: make provider support multiple transports, one direct and another passthrough. - // let provider_config = match provider_node { - // ProviderInput::Ws(rpc_url) => { - // // Validate and parse the WebSocket URL - // match Url::parse(&rpc_url)?.scheme() { - // "ws" | "wss" => { - // let connector = WsConnect { - // url: rpc_url, - // auth: None, - // }; - // let client = ClientBuilder::default().ws(connector).await?; - // ProviderConfig::Provider(Provider::new_with_client(client)) - // } - // _ => { - // return Err(anyhow::anyhow!( - // "Only `ws://` or `wss://` URLs are supported." - // )) - // } - // } - // } - // ProviderInput::Node(node_id) => { - // // Directly use the node ID - // ProviderConfig::Node(node_id) - // } - // }; - - // let provider_config = Arc::new(provider_config); - - // // handles of longrunning subscriptions. - // let connections: DashMap<(ProcessId, u64), JoinHandle>> = DashMap::new(); - // let connections = Arc::new(connections); - - // while let Some(km) = recv_in_client.recv().await { - // // clone Arcs - // let our = our.clone(); - // let send_to_loop = send_to_loop.clone(); - // let provider_config = provider_config.clone(); - // let connections = connections.clone(); - // tokio::spawn(async move { - // if let Err(e) = handle_message( - // &our, - // &km, - // &send_to_loop, - // provider_config, - // connections, - // public, - // ) - // .await - // { - // let _ = send_to_loop - // .send(make_error_message(our.to_string(), km, e)) - // .await; - // }; - // }); - // } + let mut access_settings = AccessSettings { + public: false, + allow: HashSet::new(), + deny: HashSet::new(), + }; + + // convert saved configs into data structure that we will use to route queries + let mut providers: Providers = Arc::new(DashMap::new()); + for entry in configs { + let mut ap = providers.entry(entry.chain_id).or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); + ap.add_provider_config(entry); + } + + // handles of longrunning subscriptions. + let mut active_subscriptions: ActiveSubscriptions = Arc::new(DashMap::new()); + + while let Some(km) = recv_in_client.recv().await { + let km_id = km.id; + let response_target = km.rsvp.as_ref().unwrap_or(&km.source).clone(); + if let Err(e) = handle_message( + &our, + &mut access_settings, + km, + &send_to_loop, + &caps_oracle, + &mut providers, + &mut active_subscriptions, + ) + .await + { + send_to_loop + .send(make_error_message(&our, km_id, response_target, e)) + .await + .expect("eth: kernel sender died!"); + }; + } Err(anyhow::anyhow!("eth: fatal: message receiver closed!")) } -/// Handle an incoming message. -// async fn handle_message( -// our: &str, -// km: &KernelMessage, -// send_to_loop: &MessageSender, -// provider_config: Arc, -// connections: Arc>>>, -// public: bool, -// ) -> Result<(), EthError> { -// match &km.message { -// Message::Request(req) => { -// match &*provider_config { -// ProviderConfig::Node(node) => { -// if km.source.node == our { -// // we have no provider, let's send this request to someone who has one. -// let request = KernelMessage { -// id: km.id, -// source: Address { -// node: our.to_string(), -// process: ETH_PROCESS_ID.clone(), -// }, -// target: Address { -// node: "jugodenaranja.os".to_string(), -// process: ETH_PROCESS_ID.clone(), -// }, -// rsvp: Some(km.source.clone()), -// message: Message::Request(req.clone()), -// lazy_load_blob: None, -// }; - -// let _ = send_to_loop.send(request).await; -// } else { -// // either someone asking us for rpc, or we are passing through a sub event. -// handle_remote_request(our, km, send_to_loop, None, connections, public) -// .await? -// } -// } -// ProviderConfig::Provider(provider) => { -// if km.source.node == our { -// handle_local_request(our, km, send_to_loop, &provider, connections, public) -// .await? -// } else { -// handle_remote_request( -// our, -// km, -// send_to_loop, -// Some(provider), -// connections, -// public, -// ) -// .await? -// } -// } -// } -// } -// Message::Response(_) => { -// // handle passthrough responses, send to rsvp. -// if km.source.process == "eth:distro:sys" { -// if let Some(rsvp) = &km.rsvp { -// let _ = send_to_loop -// .send(KernelMessage { -// id: rand::random(), -// source: Address { -// node: our.to_string(), -// process: ETH_PROCESS_ID.clone(), -// }, -// target: rsvp.clone(), -// rsvp: None, -// message: km.message.clone(), -// lazy_load_blob: None, -// }) -// .await; -// } -// } -// } -// } -// Ok(()) -// } +/// handle incoming requests, namely [`EthAction`] and [`EthConfigAction`]. +/// also handle responses that are passthroughs from remote provider nodes. +async fn handle_message( + our: &str, + access_settings: &mut AccessSettings, + km: KernelMessage, + send_to_loop: &MessageSender, + caps_oracle: &CapMessageSender, + providers: &mut Providers, + active_subscriptions: &mut ActiveSubscriptions, +) -> Result<(), EthError> { + match &km.message { + Message::Response(_) => handle_passthrough_response(our, send_to_loop, km).await, + Message::Request(req) => { + if let Ok(eth_action) = serde_json::from_slice(&req.body) { + // these can be from remote or local processes + return handle_eth_action( + our, + access_settings, + km, + eth_action, + providers, + active_subscriptions, + ) + .await; + } + if let Ok(eth_config_action) = serde_json::from_slice(&req.body) { + // only local node + return handle_eth_config_action( + our, + access_settings, + caps_oracle, + km, + eth_config_action, + providers, + ) + .await; + } + Err(EthError::PermissionDenied) + } + } +} + +async fn handle_passthrough_response( + our: &str, + send_to_loop: &MessageSender, + km: KernelMessage, +) -> Result<(), EthError> { + send_to_loop + .send(KernelMessage { + id: rand::random(), + source: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + target: km.rsvp.unwrap_or(km.source), + rsvp: None, + message: km.message, + lazy_load_blob: None, + }) + .await + .expect("eth: kernel sender died!"); + Ok(()) +} + +async fn handle_eth_action( + our: &str, + access_settings: &mut AccessSettings, + km: KernelMessage, + eth_action: EthAction, + providers: &mut Providers, + active_subscriptions: &mut ActiveSubscriptions, +) -> Result<(), EthError> { + // check our access settings if the request is from a remote node + if km.source.node != our { + if !access_settings.deny.contains(&km.source.node) { + if !access_settings.public { + if !access_settings.allow.contains(&km.source.node) { + return Err(EthError::PermissionDenied); + } + } + } else { + return Err(EthError::PermissionDenied); + } + } + + // for each incoming action, we need to assign a provider from our map + // based on the chain id. once we assign a provider, we can use it for + // this request. if the provider is not usable, cycle through options + // before returning an error. + match eth_action { + EthAction::SubscribeLogs { + sub_id, + chain_id, + kind, + params, + } => { + todo!() + } + EthAction::UnsubscribeLogs(sub_id) => { + active_subscriptions + .entry(km.source.process) + .and_modify(|sub_map| { + sub_map.remove(&sub_id); + }); + Ok(()) + } + EthAction::Request { + chain_id, + method, + params, + } => { + todo!() + } + } +} + +async fn handle_eth_config_action( + our: &str, + access_settings: &mut AccessSettings, + caps_oracle: &CapMessageSender, + km: KernelMessage, + eth_config_action: EthConfigAction, + providers: &mut Providers, +) -> Result<(), EthError> { + if km.source.node != our { + return Err(EthError::PermissionDenied); + } + // check capabilities to ensure the sender is allowed to make this request + let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); + caps_oracle + .send(CapMessage::Has { + on: km.source.process.clone(), + cap: Capability { + issuer: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + params: serde_json::to_string(&serde_json::json!({ + "root": true, + })) + .unwrap(), + }, + responder: send_cap_bool, + }) + .await + .expect("eth: capability oracle died!"); + if !recv_cap_bool.await.unwrap_or(false) { + return Err(EthError::PermissionDenied); + } + + // modify our providers and access settings based on config action + todo!() +} /// Handle a local request. // async fn handle_local_request( @@ -396,79 +524,55 @@ async fn handle_subscription_stream( rsvp: Option
, send_to_loop: MessageSender, ) -> Result<(), EthError> { - match rx.recv().await { - Err(e) => { - let error = Err(EthError::SubscriptionClosed(sub_id))?; - let _ = send_to_loop - .send(KernelMessage { - id: rand::random(), - source: Address { - node: our, - process: ETH_PROCESS_ID.clone(), - }, - target: target.clone(), - rsvp: rsvp.clone(), - message: Message::Request(Request { - inherit: false, - expects_response: None, - body: serde_json::to_vec(&EthSubResult::Err(EthSubError { - id: sub_id, - error: e.to_string(), - })) - .unwrap(), - metadata: None, - capabilities: vec![], - }), - lazy_load_blob: None, - }) - .await - .unwrap(); - } - Ok(value) => { - let event: SubscriptionResult = serde_json::from_str(value.get()).map_err(|_| { - EthError::RpcError("eth: failed to deserialize subscription result".to_string()) - })?; - send_to_loop - .send(KernelMessage { - id: rand::random(), - source: Address { - node: our, - process: ETH_PROCESS_ID.clone(), - }, - target: target.clone(), - rsvp: rsvp.clone(), - message: Message::Request(Request { - inherit: false, - expects_response: None, - body: serde_json::to_vec(&EthSubResult::Ok(EthSub { - id: sub_id, - result: event, - })) - .unwrap(), - metadata: None, - capabilities: vec![], - }), - lazy_load_blob: None, - }) - .await - .unwrap(); + loop { + match rx.recv().await { + Err(e) => { + return Err(EthError::SubscriptionClosed(sub_id)); + } + Ok(value) => { + let result: SubscriptionResult = + serde_json::from_str(value.get()).map_err(|_| { + EthError::RpcError( + "eth: failed to deserialize subscription result".to_string(), + ) + })?; + send_to_loop + .send(KernelMessage { + id: rand::random(), + source: Address { + node: our.clone(), + process: ETH_PROCESS_ID.clone(), + }, + target: target.clone(), + rsvp: rsvp.clone(), + message: Message::Request(Request { + inherit: false, + expects_response: None, + body: serde_json::to_vec(&EthSubResult::Ok(EthSub { + id: sub_id, + result, + })) + .unwrap(), + metadata: None, + capabilities: vec![], + }), + lazy_load_blob: None, + }) + .await + .unwrap(); + } } } - Err(EthError::SubscriptionClosed(sub_id)) } -fn make_error_message(our_node: String, km: KernelMessage, error: EthError) -> KernelMessage { - let source = km.rsvp.unwrap_or_else(|| Address { - node: our_node.clone(), - process: km.source.process.clone(), - }); +fn make_error_message(our: &str, id: u64, target: Address, error: EthError) -> KernelMessage { KernelMessage { - id: km.id, + id, source: Address { - node: our_node, + node: our.to_string(), process: ETH_PROCESS_ID.clone(), }, - target: source, + target, rsvp: None, message: Message::Response(( Response { diff --git a/kinode/src/main.rs b/kinode/src/main.rs index 7c04d97e0..c4337d620 100644 --- a/kinode/src/main.rs +++ b/kinode/src/main.rs @@ -554,6 +554,7 @@ async fn main() { eth_provider_config, kernel_message_sender.clone(), eth_provider_receiver, + caps_oracle_sender.clone(), print_sender.clone(), )); #[cfg(feature = "simulation-mode")] diff --git a/lib/src/eth.rs b/lib/src/eth.rs index 58a3addc9..c4f996a6c 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -1,7 +1,9 @@ use alloy_rpc_types::pubsub::{Params, SubscriptionKind, SubscriptionResult}; use serde::{Deserialize, Serialize}; +use std::collections::HashSet; -/// The Action and Request type that can be made to eth:distro:sys. +/// The Action and Request type that can be made to eth:distro:sys. Any process with messaging +/// capabilities can send this action to the eth provider. /// /// Will be serialized and deserialized using `serde_json::to_vec` and `serde_json::from_slice`. #[derive(Debug, Serialize, Deserialize)] @@ -10,6 +12,7 @@ pub enum EthAction { /// Logs come in as alloy_rpc_types::pubsub::SubscriptionResults SubscribeLogs { sub_id: u64, + chain_id: u64, kind: SubscriptionKind, params: Params, }, @@ -17,22 +20,25 @@ pub enum EthAction { UnsubscribeLogs(u64), /// Raw request. Used by kinode_process_lib. Request { + chain_id: u64, method: String, params: serde_json::Value, }, } /// Incoming Result type for subscription updates or errors that processes will receive. +/// Can deserialize all incoming requests from eth:distro:sys to this type. +/// +/// Will be serialized and deserialized using `serde_json::to_vec` and `serde_json::from_slice`. pub type EthSubResult = Result; -/// Incoming Request type for subscription updates. +/// Incoming Request type for successful subscription updates. #[derive(Debug, Serialize, Deserialize)] pub struct EthSub { pub id: u64, pub result: SubscriptionResult, } -/// Incoming Request for subscription errors that processes will receive. /// If your subscription is closed unexpectedly, you will receive this. #[derive(Debug, Serialize, Deserialize)] pub struct EthSubError { @@ -61,11 +67,52 @@ pub enum EthError { /// Invalid method InvalidMethod(String), /// Permission denied - PermissionDenied(String), + PermissionDenied, /// Internal RPC error RpcError(String), } +/// The action type used for configuring eth:distro:sys. Only processes which have the "root" +/// capability from eth:distro:sys can successfully send this action. +/// +/// NOTE: changes to config will not be persisted between boots, they must be saved in .env +/// to be reflected between boots. TODO: can change this +#[derive(Debug, Serialize, Deserialize)] +pub enum EthConfigAction { + /// Add a new provider to the list of providers. + AddProvider(ProviderConfig), + /// Remove a provider from the list of providers. + /// The tuple is (chain_id, node_id/rpc_url). + RemoveProvider((u64, String)), + /// make our provider public + SetPublic, + /// make our provider not-public + SetPrivate, + /// add node to whitelist on a provider + AllowNode(String), + /// remove node from whitelist on a provider + UnallowNode(String), + /// add node to blacklist on a provider + DenyNode(String), + /// remove node from blacklist on a provider + UndenyNode(String), + /// Set the list of providers to a new list. + /// Replaces all existing saved provider configs. + SetProviders(SavedConfigs), + /// Get the list of as a [`SavedConfigs`] object. + GetProviders, +} + +/// Response type from an [`EthConfigAction`] request. +#[derive(Debug, Serialize, Deserialize)] +pub enum EthConfigResponse { + Ok, + /// Response from a GetProviders request. + Providers(SavedConfigs), + /// Permission denied due to missing capability + PermissionDenied, +} + // // Internal types // @@ -103,13 +150,19 @@ pub fn to_static_str(method: &str) -> Option<&'static str> { } } +/// Settings for our ETH provider +pub struct AccessSettings { + pub public: bool, // whether or not other nodes can access through us + pub allow: HashSet, // whitelist for access (only used if public == false) + pub deny: HashSet, // blacklist for access (always used) +} + pub type SavedConfigs = Vec; /// Provider config. Can currently be a node or a ws provider instance. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ProviderConfig { pub chain_id: u64, - pub usable: bool, pub trusted: bool, pub provider: NodeOrRpcUrl, } @@ -119,3 +172,12 @@ pub enum NodeOrRpcUrl { Node(crate::core::KnsUpdate), RpcUrl(String), } + +impl std::cmp::PartialEq for NodeOrRpcUrl { + fn eq(&self, other: &str) -> bool { + match self { + NodeOrRpcUrl::Node(kns) => kns.name == other, + NodeOrRpcUrl::RpcUrl(url) => url == other, + } + } +} From 6206ad50bf1ee3b6d6663f13a6051f9ed4c15442 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Wed, 21 Feb 2024 14:02:39 -0300 Subject: [PATCH 04/23] WIP --- kinode/default_providers_testnet.json | 10 +++ kinode/src/eth/provider.rs | 123 +++++++++++++++++++++++--- lib/src/eth.rs | 2 + 3 files changed, 121 insertions(+), 14 deletions(-) diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index c502f9591..3e46a815c 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -9,6 +9,16 @@ "allow": [], "deny": [] }, + { + "chain_id": 11155111, + "trusted": false, + "provider": { + "RpcUrl": "wss://eth-sepolia.g.alchemy.com/v2/a4bRKYnvC0uT2l1rzVDAvldH3OPKQnKm" + }, + "public": false, + "allow": [], + "deny": [] + }, { "chain_id": 11155111, "trusted": true, diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index da89ba2fe..d12720525 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use url::Url; -/// mapping of chain id to ordered(TODO) list of providers +/// mapping of chain id to ordered lists of providers type Providers = Arc>; struct ActiveProviders { @@ -37,7 +37,7 @@ struct NodeProvider { type ActiveSubscriptions = Arc>>; enum ActiveSub { - Local(JoinHandle>), + Local(JoinHandle<()>), Remote(String), // name of node providing this subscription for us } @@ -93,6 +93,7 @@ pub async fn provider( caps_oracle: CapMessageSender, print_tx: PrintSender, ) -> Result<()> { + println!("provider: on\r"); let our = Arc::new(our); let mut access_settings = AccessSettings { @@ -156,6 +157,7 @@ async fn handle_message( return handle_eth_action( our, access_settings, + send_to_loop, km, eth_action, providers, @@ -205,6 +207,7 @@ async fn handle_passthrough_response( async fn handle_eth_action( our: &str, access_settings: &mut AccessSettings, + send_to_loop: &MessageSender, km: KernelMessage, eth_action: EthAction, providers: &mut Providers, @@ -231,27 +234,119 @@ async fn handle_eth_action( EthAction::SubscribeLogs { sub_id, chain_id, - kind, - params, + ref kind, + ref params, } => { - todo!() + let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( + our.to_string(), + km.id, + km.source.clone(), + km.rsvp, + send_to_loop.clone(), + eth_action, + providers.clone(), + ))); + let mut subs = active_subscriptions + .entry(km.source.process) + .or_insert(HashMap::new()); + subs.insert(sub_id, new_sub); } EthAction::UnsubscribeLogs(sub_id) => { active_subscriptions .entry(km.source.process) .and_modify(|sub_map| { - sub_map.remove(&sub_id); + if let Some(sub) = sub_map.get_mut(&sub_id) { + match sub { + ActiveSub::Local(handle) => { + handle.abort(); + } + ActiveSub::Remote(node) => { + // TODO send to them asking to abort + } + } + } }); - Ok(()) } EthAction::Request { chain_id, method, params, } => { - todo!() + //todo } } + Ok(()) +} + +/// spawned as a task +async fn create_new_subscription( + our: String, + km_id: u64, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, + eth_action: EthAction, + providers: Providers, +) { + let EthAction::SubscribeLogs { + sub_id, + chain_id, + kind, + params, + } = eth_action + else { + return; + }; + let Some(aps) = providers.get_mut(&chain_id) else { + send_to_loop + .send(make_error_message( + &our, + sub_id, + target, + EthError::NoRpcForChain, + )) + .await + .expect("eth: kernel sender died!"); + return; + }; + // first, try any url providers we have for this chain, + // then if we have none or they all fail, go to node providers. + // finally, if no provider works, return an error. + for url_provider in &aps.urls { + if let Some(pubsub) = &url_provider.pubsub { + let kind = serde_json::to_value(&kind).unwrap(); + let params = serde_json::to_value(¶ms).unwrap(); + if let Ok(id) = pubsub + .inner() + .prepare("eth_subscribe", [kind, params]) + .await + { + let rx = pubsub.inner().get_raw_subscription(id).await; + if let Err(e) = + handle_subscription_stream(&our, sub_id, rx, &target, &rsvp, &send_to_loop) + .await + { + send_to_loop + .send(make_error_message(&our, sub_id, target, e)) + .await + .expect("eth: kernel sender died!"); + } + return; + } + } + } + for node_provider in &aps.nodes { + // todo + } + send_to_loop + .send(make_error_message( + &our, + sub_id, + target, + EthError::NoRpcForChain, + )) + .await + .expect("eth: kernel sender died!"); } async fn handle_eth_config_action( @@ -517,12 +612,12 @@ async fn handle_eth_config_action( /// This task is responsible for connecting to the ETH RPC provider and streaming logs /// for a specific subscription made by a process. async fn handle_subscription_stream( - our: String, + our: &str, sub_id: u64, mut rx: RawSubscription, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, + target: &Address, + rsvp: &Option
, + send_to_loop: &MessageSender, ) -> Result<(), EthError> { loop { match rx.recv().await { @@ -540,7 +635,7 @@ async fn handle_subscription_stream( .send(KernelMessage { id: rand::random(), source: Address { - node: our.clone(), + node: our.to_string(), process: ETH_PROCESS_ID.clone(), }, target: target.clone(), @@ -559,7 +654,7 @@ async fn handle_subscription_stream( lazy_load_blob: None, }) .await - .unwrap(); + .map_err(|_| EthError::RpcError("eth: sender died".to_string()))?; } } } diff --git a/lib/src/eth.rs b/lib/src/eth.rs index c4f996a6c..04cd0d09a 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -58,6 +58,8 @@ pub enum EthResponse { #[derive(Debug, Serialize, Deserialize)] pub enum EthError { + /// No RPC provider for the chain + NoRpcForChain, /// Underlying transport error TransportError(String), /// Subscription closed From 31309454e92d185b49dfa69aa069895e8c4f87cf Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Thu, 22 Feb 2024 12:12:33 -0300 Subject: [PATCH 05/23] WIP WIP --- Cargo.lock | 24 ++- .../packages/app_store/app_store/Cargo.toml | 3 +- .../kns_indexer/kns_indexer/Cargo.toml | 3 +- .../kns_indexer/kns_indexer/src/lib.rs | 2 +- kinode/src/eth/provider.rs | 141 +++++++++++++----- kinode/src/main.rs | 12 -- kinode/src/state.rs | 10 +- 7 files changed, 136 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index affc5680e..4a1a4d3ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,7 +407,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=3232423)", + "kinode_process_lib 0.6.0", "rand 0.8.5", "serde", "serde_json", @@ -2631,6 +2631,26 @@ dependencies = [ "lib", ] +[[package]] +name = "kinode_process_lib" +version = "0.6.0" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-types", + "alloy-transport", + "anyhow", + "bincode", + "http 1.0.0", + "mime_guess", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror", + "url", + "wit-bindgen", +] + [[package]] name = "kinode_process_lib" version = "0.6.0" @@ -2724,7 +2744,7 @@ dependencies = [ "anyhow", "bincode", "hex", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=3232423)", + "kinode_process_lib 0.6.0", "rmp-serde", "serde", "serde_json", diff --git a/kinode/packages/app_store/app_store/Cargo.toml b/kinode/packages/app_store/app_store/Cargo.toml index f34a470c9..e6a5b871a 100644 --- a/kinode/packages/app_store/app_store/Cargo.toml +++ b/kinode/packages/app_store/app_store/Cargo.toml @@ -9,7 +9,8 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +kinode_process_lib = { path = "../../../../../process_lib" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml index 285e7fd49..2c269326e 100644 --- a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml +++ b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml @@ -10,7 +10,8 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" bincode = "1.3.3" hex = "0.4.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +kinode_process_lib = { path = "../../../../../process_lib" } rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index fdd673433..ccb8f3118 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -130,7 +130,7 @@ impl Guest for Component { fn main(our: Address, mut state: State) -> anyhow::Result<()> { // first, await a message from the kernel which will contain the - // contract address for the KNS version we want to track. + // chain ID and contract address for the KNS version we want to track. let mut contract_address: Option = None; loop { let Ok(Message::Request { source, body, .. }) = await_message() else { diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index d12720525..55fe58b8a 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -5,6 +5,7 @@ use alloy_rpc_types::pubsub::SubscriptionResult; use alloy_transport_ws::WsConnect; use anyhow::Result; use dashmap::DashMap; +use futures::Future; use lib::types::core::*; use lib::types::eth::*; use serde::{Deserialize, Serialize}; @@ -17,17 +18,20 @@ use url::Url; /// mapping of chain id to ordered lists of providers type Providers = Arc>; +#[derive(Debug)] struct ActiveProviders { pub urls: Vec, pub nodes: Vec, } +#[derive(Debug)] struct UrlProvider { pub trusted: bool, pub url: String, pub pubsub: Option>, } +#[derive(Debug)] struct NodeProvider { pub trusted: bool, pub name: String, @@ -36,6 +40,7 @@ struct NodeProvider { /// existing subscriptions held by local processes type ActiveSubscriptions = Arc>>; +#[derive(Debug)] enum ActiveSub { Local(JoinHandle<()>), Remote(String), // name of node providing this subscription for us @@ -112,6 +117,8 @@ pub async fn provider( ap.add_provider_config(entry); } + println!("providers: {providers:?}\r"); + // handles of longrunning subscriptions. let mut active_subscriptions: ActiveSubscriptions = Arc::new(DashMap::new()); @@ -149,6 +156,7 @@ async fn handle_message( providers: &mut Providers, active_subscriptions: &mut ActiveSubscriptions, ) -> Result<(), EthError> { + println!("provider: handle_message\r"); match &km.message { Message::Response(_) => handle_passthrough_response(our, send_to_loop, km).await, Message::Request(req) => { @@ -187,6 +195,7 @@ async fn handle_passthrough_response( send_to_loop: &MessageSender, km: KernelMessage, ) -> Result<(), EthError> { + println!("provider: handle_passthrough_response\r"); send_to_loop .send(KernelMessage { id: rand::random(), @@ -213,6 +222,7 @@ async fn handle_eth_action( providers: &mut Providers, active_subscriptions: &mut ActiveSubscriptions, ) -> Result<(), EthError> { + println!("provider: handle_eth_action\r"); // check our access settings if the request is from a remote node if km.source.node != our { if !access_settings.deny.contains(&km.source.node) { @@ -245,6 +255,7 @@ async fn handle_eth_action( send_to_loop.clone(), eth_action, providers.clone(), + active_subscriptions.clone(), ))); let mut subs = active_subscriptions .entry(km.source.process) @@ -279,6 +290,7 @@ async fn handle_eth_action( } /// spawned as a task +/// cleans itself up when the subscription is closed or fails. async fn create_new_subscription( our: String, km_id: u64, @@ -287,7 +299,76 @@ async fn create_new_subscription( send_to_loop: MessageSender, eth_action: EthAction, providers: Providers, + active_subscriptions: ActiveSubscriptions, ) { + println!("provider: create_new_subscription\r"); + match build_subscription( + our.clone(), + km_id, + target.clone(), + rsvp.clone(), + send_to_loop.clone(), + ð_action, + providers, + ) + .await + { + Ok(future) => { + // send a response to the target that the subscription was successful + send_to_loop + .send(KernelMessage { + id: km_id, + source: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + target: target.clone(), + rsvp: rsvp.clone(), + message: Message::Response(( + Response { + inherit: false, + body: serde_json::to_vec(&EthResponse::Ok).unwrap(), + metadata: None, + capabilities: vec![], + }, + None, + )), + lazy_load_blob: None, + }) + .await + .expect("eth: sender died!"); + // await the subscription error and kill it if so + if let Err(e) = future.await { + send_to_loop + .send(make_error_message(&our, km_id, target.clone(), e)) + .await + .expect("eth: kernel sender died!"); + } + } + Err(e) => { + send_to_loop + .send(make_error_message(&our, km_id, target.clone(), e)) + .await + .expect("eth: kernel sender died!"); + } + } + active_subscriptions + .entry(target.process) + .and_modify(|sub_map| { + sub_map.remove(&km_id); + }); +} + +async fn build_subscription( + our: String, + km_id: u64, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, + eth_action: &EthAction, + providers: Providers, +) -> Result>, EthError> { + println!("provider: build_subscription\r"); let EthAction::SubscribeLogs { sub_id, chain_id, @@ -295,19 +376,12 @@ async fn create_new_subscription( params, } = eth_action else { - return; + return Err(EthError::InvalidMethod( + "eth: only accepts subscribe logs requests".to_string(), + )); }; let Some(aps) = providers.get_mut(&chain_id) else { - send_to_loop - .send(make_error_message( - &our, - sub_id, - target, - EthError::NoRpcForChain, - )) - .await - .expect("eth: kernel sender died!"); - return; + return Err(EthError::NoRpcForChain); }; // first, try any url providers we have for this chain, // then if we have none or they all fail, go to node providers. @@ -322,31 +396,21 @@ async fn create_new_subscription( .await { let rx = pubsub.inner().get_raw_subscription(id).await; - if let Err(e) = - handle_subscription_stream(&our, sub_id, rx, &target, &rsvp, &send_to_loop) - .await - { - send_to_loop - .send(make_error_message(&our, sub_id, target, e)) - .await - .expect("eth: kernel sender died!"); - } - return; + return Ok(maintain_subscription( + our, + *sub_id, + rx, + target, + rsvp, + send_to_loop, + )); } } } for node_provider in &aps.nodes { // todo } - send_to_loop - .send(make_error_message( - &our, - sub_id, - target, - EthError::NoRpcForChain, - )) - .await - .expect("eth: kernel sender died!"); + return Err(EthError::NoRpcForChain); } async fn handle_eth_config_action( @@ -357,6 +421,7 @@ async fn handle_eth_config_action( eth_config_action: EthConfigAction, providers: &mut Providers, ) -> Result<(), EthError> { + println!("provider: handle_eth_config_action\r"); if km.source.node != our { return Err(EthError::PermissionDenied); } @@ -611,14 +676,15 @@ async fn handle_eth_config_action( /// Executed as a long-lived task. The JoinHandle is stored in the `connections` map. /// This task is responsible for connecting to the ETH RPC provider and streaming logs /// for a specific subscription made by a process. -async fn handle_subscription_stream( - our: &str, +async fn maintain_subscription( + our: String, sub_id: u64, mut rx: RawSubscription, - target: &Address, - rsvp: &Option
, - send_to_loop: &MessageSender, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, ) -> Result<(), EthError> { + println!("provider: maintain_subscription\r"); loop { match rx.recv().await { Err(e) => { @@ -660,9 +726,10 @@ async fn handle_subscription_stream( } } -fn make_error_message(our: &str, id: u64, target: Address, error: EthError) -> KernelMessage { +fn make_error_message(our: &str, km_id: u64, target: Address, error: EthError) -> KernelMessage { + println!("provider: make_error_message\r"); KernelMessage { - id, + id: km_id, source: Address { node: our.to_string(), process: ETH_PROCESS_ID.clone(), diff --git a/kinode/src/main.rs b/kinode/src/main.rs index c4337d620..6d8c988ca 100644 --- a/kinode/src/main.rs +++ b/kinode/src/main.rs @@ -548,7 +548,6 @@ async fn main() { timer_service_receiver, print_sender.clone(), )); - #[cfg(not(feature = "simulation-mode"))] tasks.spawn(eth::provider::provider( our.name.clone(), eth_provider_config, @@ -557,17 +556,6 @@ async fn main() { caps_oracle_sender.clone(), print_sender.clone(), )); - #[cfg(feature = "simulation-mode")] - if let Some(ref rpc_url) = rpc_url { - tasks.spawn(eth::provider::provider( - our.name.clone(), - eth_provider, - public, - kernel_message_sender.clone(), - eth_provider_receiver, - print_sender.clone(), - )); - } tasks.spawn(vfs::vfs( our.name.clone(), kernel_message_sender.clone(), diff --git a/kinode/src/state.rs b/kinode/src/state.rs index f7f1992a2..95ecc1b63 100644 --- a/kinode/src/state.rs +++ b/kinode/src/state.rs @@ -391,11 +391,11 @@ async fn bootstrap( for (package_metadata, mut package) in packages.clone() { let package_name = package_metadata.properties.package_name.as_str(); - // special case tester: only load it in if in simulation mode - if package_name == "tester" { - #[cfg(not(feature = "simulation-mode"))] - continue; - } + // // special case tester: only load it in if in simulation mode + // if package_name == "tester" { + // #[cfg(not(feature = "simulation-mode"))] + // continue; + // } println!("fs: handling package {package_name}...\r"); let package_publisher = package_metadata.properties.publisher.as_str(); From 989191037f46316b674037fb983218140146c3ee Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Thu, 22 Feb 2024 18:01:33 -0300 Subject: [PATCH 06/23] WIP: RPC URL providers working! updated process_lib in relevant apps --- Cargo.lock | 19 +- .../packages/app_store/app_store/Cargo.toml | 3 +- .../packages/app_store/app_store/src/lib.rs | 19 +- .../packages/kns_indexer/get_block/Cargo.toml | 2 +- .../packages/kns_indexer/get_block/src/lib.rs | 14 +- .../kns_indexer/kns_indexer/Cargo.toml | 3 +- .../kns_indexer/kns_indexer/src/lib.rs | 92 ++-- kinode/src/eth/provider.rs | 487 +++++++----------- kinode/src/kernel/mod.rs | 4 +- kinode/src/main.rs | 10 +- lib/src/net.rs | 0 11 files changed, 273 insertions(+), 380 deletions(-) delete mode 100644 lib/src/net.rs diff --git a/Cargo.lock b/Cargo.lock index b751369e4..a8aefd9c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,7 +407,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bincode", - "kinode_process_lib 0.6.0", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e)", "rand 0.8.5", "serde", "serde_json", @@ -1967,7 +1967,7 @@ dependencies = [ name = "get_block" version = "0.1.0" dependencies = [ - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=3232423)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e)", "serde", "serde_json", "wit-bindgen", @@ -2634,11 +2634,8 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" +source = "git+https://github.com/kinode-dao/process_lib?rev=12bf9ee#12bf9eefeb9237db5e5165647fa91b437b05e169" dependencies = [ - "alloy-json-rpc", - "alloy-primitives", - "alloy-rpc-types", - "alloy-transport", "anyhow", "bincode", "http 1.0.0", @@ -2654,8 +2651,12 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=12bf9ee#12bf9eefeb9237db5e5165647fa91b437b05e169" +source = "git+https://github.com/kinode-dao/process_lib?rev=3232423#323242399efdcdad02e7f31bb6a9cc5eec048610" dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-types", + "alloy-transport", "anyhow", "bincode", "http 1.0.0", @@ -2671,7 +2672,7 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=3232423#323242399efdcdad02e7f31bb6a9cc5eec048610" +source = "git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e#7b6fd6ee160299514fee30b315a2a53fbfb434d7" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -2744,7 +2745,7 @@ dependencies = [ "anyhow", "bincode", "hex", - "kinode_process_lib 0.6.0", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e)", "rmp-serde", "serde", "serde_json", diff --git a/kinode/packages/app_store/app_store/Cargo.toml b/kinode/packages/app_store/app_store/Cargo.toml index e6a5b871a..d6d948a14 100644 --- a/kinode/packages/app_store/app_store/Cargo.toml +++ b/kinode/packages/app_store/app_store/Cargo.toml @@ -9,8 +9,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" anyhow = "1.0" bincode = "1.3.3" -# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } -kinode_process_lib = { path = "../../../../../process_lib" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "7b6fd6e" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/app_store/app_store/src/lib.rs b/kinode/packages/app_store/app_store/src/lib.rs index 6965c1265..02ffd2085 100644 --- a/kinode/packages/app_store/app_store/src/lib.rs +++ b/kinode/packages/app_store/app_store/src/lib.rs @@ -42,7 +42,8 @@ use ft_worker_lib::{ /// - uninstalled + deleted /// - set to automatically update if a new version is available -const CONTRACT_ADDRESS: &str = "0x18c39eB547A0060C6034f8bEaFB947D1C16eADF1"; +const CHAIN_ID: u64 = 11155111; // sepolia +const CONTRACT_ADDRESS: &str = "0x18c39eB547A0060C6034f8bEaFB947D1C16eADF1"; // sepolia const EVENTS: [&str; 3] = [ "AppRegistered(uint256,string,bytes,string,bytes32)", @@ -117,15 +118,17 @@ fn init(our: Address) { .from_block(state.last_saved_block - 1) .events(EVENTS); - let logs = get_logs(&filter); + let logs = get_logs(CHAIN_ID, &filter); if let Ok(logs) = logs { for log in logs { - state.ingest_listings_contract_event(&our, log); + if let Err(e) = state.ingest_listings_contract_event(&our, log) { + println!("app store: error ingesting log: {e:?}"); + }; } } - subscribe(1, filter).unwrap(); + subscribe(1, CHAIN_ID, filter).unwrap(); loop { match await_message() { @@ -345,14 +348,16 @@ fn handle_local_request( .from_block(state.last_saved_block - 1) .events(EVENTS); - let logs = get_logs(&filter); + let logs = get_logs(CHAIN_ID, &filter); if let Ok(logs) = logs { for log in logs { - state.ingest_listings_contract_event(our, log); + if let Err(e) = state.ingest_listings_contract_event(our, log) { + println!("app store: error ingesting log: {e:?}"); + }; } } - subscribe(1, filter).unwrap(); + subscribe(1, CHAIN_ID, filter).unwrap(); LocalResponse::RebuiltIndex } diff --git a/kinode/packages/kns_indexer/get_block/Cargo.toml b/kinode/packages/kns_indexer/get_block/Cargo.toml index c604f349e..9ea60ea29 100644 --- a/kinode/packages/kns_indexer/get_block/Cargo.toml +++ b/kinode/packages/kns_indexer/get_block/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "7b6fd6e" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/kns_indexer/get_block/src/lib.rs b/kinode/packages/kns_indexer/get_block/src/lib.rs index 61a0e9f0b..a4e561117 100644 --- a/kinode/packages/kns_indexer/get_block/src/lib.rs +++ b/kinode/packages/kns_indexer/get_block/src/lib.rs @@ -1,5 +1,5 @@ use kinode_process_lib::{ - await_next_request_body, call_init, eth::get_block_number, println, Address, Request, SendError, + await_next_request_body, call_init, eth::get_block_number, println, Address, }; wit_bindgen::generate!({ @@ -12,13 +12,19 @@ wit_bindgen::generate!({ call_init!(init); -fn init(our: Address) { - let Ok(_args) = await_next_request_body() else { +fn init(_our: Address) { + let Ok(args) = await_next_request_body() else { println!("get_block: failed to get args, aborting"); return; }; - match get_block_number() { + // incoming args bytes are a string we parse to u64, if none provided, default to 1 + let chain_id = std::str::from_utf8(&args) + .unwrap_or("1") + .parse::() + .unwrap_or(1); + + match get_block_number(chain_id) { Ok(block_number) => { println!("latest block number: {block_number}"); } diff --git a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml index 2c269326e..6da9a69d4 100644 --- a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml +++ b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml @@ -10,8 +10,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" bincode = "1.3.3" hex = "0.4.3" -# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } -kinode_process_lib = { path = "../../../../../process_lib" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "7b6fd6e" } rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index ccb8f3118..b27f97c57 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -25,8 +25,9 @@ wit_bindgen::generate!({ #[derive(Clone, Debug, Serialize, Deserialize)] struct State { + chain_id: u64, // what contract this state pertains to - contract_address: Option, + contract_address: String, // namehash to human readable name names: HashMap, // human readable name to most recent on-chain routing information as json @@ -104,20 +105,51 @@ impl Guest for Component { fn init(our: String) { let our: Address = our.parse().unwrap(); - let mut state: State = State { - contract_address: None, - names: HashMap::new(), - nodes: HashMap::new(), - block: 1, - }; + // first, await a message from the kernel which will contain the + // chain ID and contract address for the KNS version we want to track. + let chain_id: u64; + let contract_address: String; + loop { + let Ok(Message::Request { source, body, .. }) = await_message() else { + continue; + }; + if source.process != "kernel:distro:sys" { + continue; + } + (chain_id, contract_address) = serde_json::from_slice(&body).unwrap(); + break; + } + println!( + "kns_indexer: indexing on contract address {}", + contract_address + ); // if we have state, load it in - match get_typed_state(|bytes| Ok(bincode::deserialize(bytes)?)) { + let state: State = match get_typed_state(|bytes| Ok(bincode::deserialize::(bytes)?)) + { Some(s) => { - state = s; + // if chain id or contract address changed from a previous run, reset state + if s.chain_id != chain_id || s.contract_address != contract_address { + println!("kns_indexer: resetting state because runtime contract address or chain ID changed"); + State { + chain_id, + contract_address, + names: HashMap::new(), + nodes: HashMap::new(), + block: 1, + } + } else { + s + } } - None => {} - } + None => State { + chain_id, + contract_address: contract_address.clone(), + names: HashMap::new(), + nodes: HashMap::new(), + block: 1, + }, + }; match main(our, state) { Ok(_) => {} @@ -129,34 +161,6 @@ impl Guest for Component { } fn main(our: Address, mut state: State) -> anyhow::Result<()> { - // first, await a message from the kernel which will contain the - // chain ID and contract address for the KNS version we want to track. - let mut contract_address: Option = None; - loop { - let Ok(Message::Request { source, body, .. }) = await_message() else { - continue; - }; - if source.process != "kernel:distro:sys" { - continue; - } - contract_address = Some(std::str::from_utf8(&body).unwrap().to_string()); - break; - } - println!( - "kns_indexer: indexing on contract address {}", - contract_address.as_ref().unwrap() - ); - // if contract address changed from a previous run, reset state - if state.contract_address != contract_address { - println!("resetting state for some reason."); - state = State { - contract_address: contract_address.clone(), - names: HashMap::new(), - nodes: HashMap::new(), - block: 1, - }; - } - // shove all state into net::net Request::new() .target((&our.node, "net", "distro", "sys")) @@ -166,7 +170,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { .send()?; let filter = Filter::new() - .address(contract_address.unwrap().parse::().unwrap()) + .address(state.contract_address.parse::().unwrap()) .to_block(BlockNumberOrTag::Latest) .from_block(state.block - 1) .events(vec![ @@ -178,8 +182,8 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { ]); // if block in state is < current_block, get logs from that part. - if state.block < get_block_number()? { - let logs = get_logs(&filter)?; + if state.block < get_block_number(state.chain_id)? { + let logs = get_logs(state.chain_id, &filter)?; for log in logs { handle_log(&our, &mut state, &log)?; } @@ -194,7 +198,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { set_state(&bincode::serialize(&state)?); - subscribe(1, filter.clone())?; + subscribe(1, state.chain_id, filter.clone())?; let mut pending_requests: BTreeMap> = BTreeMap::new(); @@ -266,7 +270,7 @@ fn handle_eth_message( } Err(e) => { println!("kns_indexer: got sub error, resubscribing.. {:?}", e.error); - subscribe(1, filter.clone())?; + subscribe(1, state.chain_id, filter.clone())?; } } diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index 55fe58b8a..07d7064b8 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -72,6 +72,7 @@ impl ActiveProviders { } async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { + println!("provider: activate_url_provider\r"); match Url::parse(&provider.url)?.scheme() { "ws" | "wss" => { let connector = WsConnect { @@ -136,10 +137,9 @@ pub async fn provider( ) .await { - send_to_loop + let _ = send_to_loop .send(make_error_message(&our, km_id, response_target, e)) - .await - .expect("eth: kernel sender died!"); + .await; }; } Err(anyhow::anyhow!("eth: fatal: message receiver closed!")) @@ -222,7 +222,7 @@ async fn handle_eth_action( providers: &mut Providers, active_subscriptions: &mut ActiveSubscriptions, ) -> Result<(), EthError> { - println!("provider: handle_eth_action\r"); + println!("provider: handle_eth_action: {eth_action:?}\r"); // check our access settings if the request is from a remote node if km.source.node != our { if !access_settings.deny.contains(&km.source.node) { @@ -241,12 +241,7 @@ async fn handle_eth_action( // this request. if the provider is not usable, cycle through options // before returning an error. match eth_action { - EthAction::SubscribeLogs { - sub_id, - chain_id, - ref kind, - ref params, - } => { + EthAction::SubscribeLogs { sub_id, .. } => { let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( our.to_string(), km.id, @@ -278,12 +273,16 @@ async fn handle_eth_action( } }); } - EthAction::Request { - chain_id, - method, - params, - } => { - //todo + EthAction::Request { .. } => { + tokio::spawn(fulfill_request( + our.to_string(), + km.id, + km.source.clone(), + km.rsvp, + send_to_loop.clone(), + eth_action, + providers.clone(), + )); } } Ok(()) @@ -339,17 +338,15 @@ async fn create_new_subscription( .expect("eth: sender died!"); // await the subscription error and kill it if so if let Err(e) = future.await { - send_to_loop + let _ = send_to_loop .send(make_error_message(&our, km_id, target.clone(), e)) - .await - .expect("eth: kernel sender died!"); + .await; } } Err(e) => { - send_to_loop + let _ = send_to_loop .send(make_error_message(&our, km_id, target.clone(), e)) - .await - .expect("eth: kernel sender died!"); + .await; } } active_subscriptions @@ -380,32 +377,42 @@ async fn build_subscription( "eth: only accepts subscribe logs requests".to_string(), )); }; - let Some(aps) = providers.get_mut(&chain_id) else { + let Some(mut aps) = providers.get_mut(&chain_id) else { return Err(EthError::NoRpcForChain); }; // first, try any url providers we have for this chain, // then if we have none or they all fail, go to node providers. // finally, if no provider works, return an error. - for url_provider in &aps.urls { - if let Some(pubsub) = &url_provider.pubsub { - let kind = serde_json::to_value(&kind).unwrap(); - let params = serde_json::to_value(¶ms).unwrap(); - if let Ok(id) = pubsub - .inner() - .prepare("eth_subscribe", [kind, params]) - .await - { - let rx = pubsub.inner().get_raw_subscription(id).await; - return Ok(maintain_subscription( - our, - *sub_id, - rx, - target, - rsvp, - send_to_loop, - )); + for url_provider in &mut aps.urls { + let pubsub = match &url_provider.pubsub { + Some(pubsub) => pubsub, + None => { + if let Ok(()) = activate_url_provider(url_provider).await { + url_provider.pubsub.as_ref().unwrap() + } else { + continue; + } } + }; + let kind = serde_json::to_value(&kind).unwrap(); + let params = serde_json::to_value(¶ms).unwrap(); + if let Ok(id) = pubsub + .inner() + .prepare("eth_subscribe", [kind, params]) + .await + { + let rx = pubsub.inner().get_raw_subscription(id).await; + return Ok(maintain_subscription( + our, + *sub_id, + rx, + target, + rsvp, + send_to_loop, + )); } + // this provider failed and needs to be reset + url_provider.pubsub = None; } for node_provider in &aps.nodes { // todo @@ -413,269 +420,6 @@ async fn build_subscription( return Err(EthError::NoRpcForChain); } -async fn handle_eth_config_action( - our: &str, - access_settings: &mut AccessSettings, - caps_oracle: &CapMessageSender, - km: KernelMessage, - eth_config_action: EthConfigAction, - providers: &mut Providers, -) -> Result<(), EthError> { - println!("provider: handle_eth_config_action\r"); - if km.source.node != our { - return Err(EthError::PermissionDenied); - } - // check capabilities to ensure the sender is allowed to make this request - let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); - caps_oracle - .send(CapMessage::Has { - on: km.source.process.clone(), - cap: Capability { - issuer: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - params: serde_json::to_string(&serde_json::json!({ - "root": true, - })) - .unwrap(), - }, - responder: send_cap_bool, - }) - .await - .expect("eth: capability oracle died!"); - if !recv_cap_bool.await.unwrap_or(false) { - return Err(EthError::PermissionDenied); - } - - // modify our providers and access settings based on config action - todo!() -} - -/// Handle a local request. -// async fn handle_local_request( -// our: &str, -// km: &KernelMessage, -// send_to_loop: &MessageSender, -// provider: &Provider, -// connections: Arc>>>, -// public: bool, -// ) -> Result<(), EthError> { -// let Message::Request(req) = &km.message else { -// return Err(EthError::InvalidMethod( -// "eth: only accepts requests".to_string(), -// )); -// }; -// let action = serde_json::from_slice::(&req.body).map_err(|e| { -// EthError::InvalidMethod(format!("eth: failed to deserialize request: {:?}", e)) -// })?; - -// // we might want some of these in payloads.. sub items? -// let return_body: EthResponse = match action { -// EthAction::SubscribeLogs { -// sub_id, -// kind, -// params, -// } => { -// let sub_id = (km.target.process.clone(), sub_id); - -// let kind = serde_json::to_value(&kind).unwrap(); -// let params = serde_json::to_value(¶ms).unwrap(); - -// let id = provider -// .inner() -// .prepare("eth_subscribe", [kind, params]) -// .await -// .map_err(|e| EthError::TransportError(e.to_string()))?; - -// let rx = provider.inner().get_raw_subscription(id).await; -// let handle = tokio::spawn(handle_subscription_stream( -// our.to_string(), -// sub_id.1.clone(), -// rx, -// km.source.clone(), -// km.rsvp.clone(), -// send_to_loop.clone(), -// )); - -// connections.insert(sub_id, handle); -// EthResponse::Ok -// } -// EthAction::UnsubscribeLogs(sub_id) => { -// let sub_id = (km.target.process.clone(), sub_id); -// let handle = connections -// .remove(&sub_id) -// .ok_or(EthError::SubscriptionNotFound)?; - -// handle.1.abort(); -// EthResponse::Ok -// } -// EthAction::Request { method, params } => { -// let method = to_static_str(&method).ok_or(EthError::InvalidMethod(method))?; - -// let response: serde_json::Value = provider -// .inner() -// .prepare(method, params) -// .await -// .map_err(|e| EthError::TransportError(e.to_string()))?; -// EthResponse::Response { value: response } -// } -// }; -// if let Some(_) = req.expects_response { -// let _ = send_to_loop -// .send(KernelMessage { -// id: km.id, -// source: Address { -// node: our.to_string(), -// process: ETH_PROCESS_ID.clone(), -// }, -// target: km.source.clone(), -// rsvp: km.rsvp.clone(), -// message: Message::Response(( -// Response { -// inherit: false, -// body: serde_json::to_vec(&return_body).unwrap(), -// metadata: req.metadata.clone(), -// capabilities: vec![], -// }, -// None, -// )), -// lazy_load_blob: None, -// }) -// .await; -// } - -// Ok(()) -// } - -/// here we are either processing another nodes request. -/// or we are passing through an ethSub Request.. -// async fn handle_remote_request( -// our: &str, -// km: &KernelMessage, -// send_to_loop: &MessageSender, -// provider: Option<&Provider>, -// connections: Arc>>>, -// public: bool, -// ) -> Result<(), EthError> { -// let Message::Request(req) = &km.message else { -// return Err(EthError::InvalidMethod( -// "eth: only accepts requests".to_string(), -// )); -// }; - -// if let Some(provider) = provider { -// // we need some sort of agreement perhaps on rpc providing. -// // even with an agreement, fake ethsubevents could be sent to us. -// // light clients could verify blocks perhaps... -// if !public { -// return Err(EthError::PermissionDenied("not on the list.".to_string())); -// } - -// let action = serde_json::from_slice::(&req.body).map_err(|e| { -// EthError::InvalidMethod(format!("eth: failed to deserialize request: {:?}", e)) -// })?; - -// let return_body: EthResponse = match action { -// EthAction::SubscribeLogs { -// sub_id, -// kind, -// params, -// } => { -// let sub_id = (km.target.process.clone(), sub_id); - -// let kind = serde_json::to_value(&kind).unwrap(); -// let params = serde_json::to_value(¶ms).unwrap(); - -// let id = provider -// .inner() -// .prepare("eth_subscribe", [kind, params]) -// .await -// .map_err(|e| EthError::TransportError(e.to_string()))?; - -// let rx = provider.inner().get_raw_subscription(id).await; -// let handle = tokio::spawn(handle_subscription_stream( -// our.to_string(), -// sub_id.1.clone(), -// rx, -// km.target.clone(), -// km.rsvp.clone(), -// send_to_loop.clone(), -// )); - -// connections.insert(sub_id, handle); -// EthResponse::Ok -// } -// EthAction::UnsubscribeLogs(sub_id) => { -// let sub_id = (km.target.process.clone(), sub_id); -// let handle = connections -// .remove(&sub_id) -// .ok_or(EthError::SubscriptionNotFound)?; - -// handle.1.abort(); -// EthResponse::Ok -// } -// EthAction::Request { method, params } => { -// let method = to_static_str(&method).ok_or(EthError::InvalidMethod(method))?; - -// let response: serde_json::Value = provider -// .inner() -// .prepare(method, params) -// .await -// .map_err(|e| EthError::TransportError(e.to_string()))?; - -// EthResponse::Response { value: response } -// } -// }; - -// let response = KernelMessage { -// id: km.id, -// source: Address { -// node: our.to_string(), -// process: ETH_PROCESS_ID.clone(), -// }, -// target: km.source.clone(), -// rsvp: km.rsvp.clone(), -// message: Message::Response(( -// Response { -// inherit: false, -// body: serde_json::to_vec(&return_body).unwrap(), -// metadata: req.metadata.clone(), -// capabilities: vec![], -// }, -// None, -// )), -// lazy_load_blob: None, -// }; - -// let _ = send_to_loop.send(response).await; -// } else { -// // We do not have a provider, this is a reply for a request made by us. -// if let Ok(eth_sub) = serde_json::from_slice::(&req.body) { -// // forward... -// if let Some(target) = km.rsvp.clone() { -// let _ = send_to_loop -// .send(KernelMessage { -// id: rand::random(), -// source: Address { -// node: our.to_string(), -// process: ETH_PROCESS_ID.clone(), -// }, -// target: target, -// rsvp: None, -// message: Message::Request(req.clone()), -// lazy_load_blob: None, -// }) -// .await; -// } -// } -// } -// Ok(()) -// } - -/// Executed as a long-lived task. The JoinHandle is stored in the `connections` map. -/// This task is responsible for connecting to the ETH RPC provider and streaming logs -/// for a specific subscription made by a process. async fn maintain_subscription( our: String, sub_id: u64, @@ -726,6 +470,141 @@ async fn maintain_subscription( } } +async fn fulfill_request( + our: String, + km_id: u64, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, + eth_action: EthAction, + providers: Providers, +) { + println!("provider: fulfill_request\r"); + let EthAction::Request { + chain_id, + method, + params, + } = eth_action + else { + return; + }; + let Some(method) = to_static_str(&method) else { + let _ = send_to_loop + .send(make_error_message( + &our, + km_id, + target, + EthError::InvalidMethod(method), + )) + .await; + return; + }; + let Some(mut aps) = providers.get_mut(&chain_id) else { + let _ = send_to_loop + .send(make_error_message( + &our, + km_id, + target, + EthError::NoRpcForChain, + )) + .await; + return; + }; + // first, try any url providers we have for this chain, + // then if we have none or they all fail, go to node providers. + // finally, if no provider works, return an error. + for url_provider in &mut aps.urls { + let pubsub = match &url_provider.pubsub { + Some(pubsub) => pubsub, + None => { + if let Ok(()) = activate_url_provider(url_provider).await { + url_provider.pubsub.as_ref().unwrap() + } else { + continue; + } + } + }; + let response = pubsub.inner().prepare(method, params.clone()).await; + if let Ok(value) = response { + send_to_loop + .send(KernelMessage { + id: km_id, + source: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + target, + rsvp, + message: Message::Response(( + Response { + inherit: false, + body: serde_json::to_vec(&EthResponse::Response { value }).unwrap(), + metadata: None, + capabilities: vec![], + }, + None, + )), + lazy_load_blob: None, + }) + .await + .expect("eth: sender died!"); + return; + } + // this provider failed and needs to be reset + url_provider.pubsub = None; + } + for node_provider in &aps.nodes { + // todo + } + let _ = send_to_loop + .send(make_error_message( + &our, + km_id, + target, + EthError::NoRpcForChain, + )) + .await; +} + +async fn handle_eth_config_action( + our: &str, + access_settings: &mut AccessSettings, + caps_oracle: &CapMessageSender, + km: KernelMessage, + eth_config_action: EthConfigAction, + providers: &mut Providers, +) -> Result<(), EthError> { + println!("provider: handle_eth_config_action\r"); + if km.source.node != our { + return Err(EthError::PermissionDenied); + } + // check capabilities to ensure the sender is allowed to make this request + let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); + caps_oracle + .send(CapMessage::Has { + on: km.source.process.clone(), + cap: Capability { + issuer: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + params: serde_json::to_string(&serde_json::json!({ + "root": true, + })) + .unwrap(), + }, + responder: send_cap_bool, + }) + .await + .expect("eth: capability oracle died!"); + if !recv_cap_bool.await.unwrap_or(false) { + return Err(EthError::PermissionDenied); + } + + // modify our providers and access settings based on config action + todo!() +} + fn make_error_message(our: &str, km_id: u64, target: Address, error: EthError) -> KernelMessage { println!("provider: make_error_message\r"); KernelMessage { diff --git a/kinode/src/kernel/mod.rs b/kinode/src/kernel/mod.rs index 608af0c72..fde3f7c30 100644 --- a/kinode/src/kernel/mod.rs +++ b/kinode/src/kernel/mod.rs @@ -670,7 +670,7 @@ pub async fn kernel( mut recv_debug_in_loop: t::DebugReceiver, send_to_net: t::MessageSender, home_directory_path: String, - contract_address: String, + contract_chain_and_address: (u64, String), runtime_extensions: Vec<(t::ProcessId, t::MessageSender, bool)>, default_pki_entries: Vec, ) -> Result<()> { @@ -872,7 +872,7 @@ pub async fn kernel( message: t::Message::Request(t::Request { inherit: false, expects_response: None, - body: contract_address.as_bytes().to_vec(), + body: serde_json::to_vec(&contract_chain_and_address).unwrap(), metadata: None, capabilities: vec![], }), diff --git a/kinode/src/main.rs b/kinode/src/main.rs index 6d8c988ca..99dd7335a 100644 --- a/kinode/src/main.rs +++ b/kinode/src/main.rs @@ -145,10 +145,10 @@ async fn main() { *matches.get_one::("detached").unwrap(), ); - let contract_address = if on_testnet { - register::KNS_SEPOLIA_ADDRESS + let contract_chain_and_address: (u64, String) = if on_testnet { + (11155111, register::KNS_SEPOLIA_ADDRESS.to_string()) } else { - register::KNS_OPTIMISM_ADDRESS + (10, register::KNS_OPTIMISM_ADDRESS.to_string()) }; // check .testnet file for true/false in order to enforce testnet mode on subsequent boots of this node @@ -464,7 +464,7 @@ async fn main() { kernel_debug_message_receiver, net_message_sender.clone(), home_directory_path.clone(), - contract_address.to_string(), + contract_chain_and_address.clone(), runtime_extensions, // from saved eth provider config, filter for node identities which will be // bootstrapped into the networking module, so that this node can start @@ -491,7 +491,7 @@ async fn main() { print_sender.clone(), net_message_sender, net_message_receiver, - contract_address.to_string(), + contract_chain_and_address.1, REVEAL_IP, )); #[cfg(feature = "simulation-mode")] diff --git a/lib/src/net.rs b/lib/src/net.rs deleted file mode 100644 index e69de29bb..000000000 From 365ba8ca703921875addb7eaf071e6f0ca07dffc Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 26 Feb 2024 01:42:38 -0300 Subject: [PATCH 07/23] WIP stuck on bizarre alloy bug --- Cargo.lock | 125 +++++++++++++----- kinode/Cargo.toml | 10 +- kinode/default_providers_testnet.json | 2 +- .../packages/app_store/app_store/Cargo.toml | 3 +- .../packages/app_store/app_store/src/lib.rs | 97 +++++++++----- .../packages/kns_indexer/get_block/Cargo.toml | 2 +- .../packages/kns_indexer/get_block/src/lib.rs | 9 +- .../kns_indexer/kns_indexer/Cargo.toml | 3 +- .../kns_indexer/kns_indexer/src/lib.rs | 67 +++++++--- kinode/src/eth/provider.rs | 72 ++++++++-- lib/Cargo.toml | 4 +- lib/src/eth.rs | 6 +- 12 files changed, 295 insertions(+), 105 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8aefd9c4..8f980da49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,7 +93,7 @@ checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,13 +112,24 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-json-rpc" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-eips", - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "alloy-primitives", "alloy-rlp", "serde", @@ -149,14 +160,14 @@ dependencies = [ [[package]] name = "alloy-providers" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-network", "alloy-primitives", "alloy-rpc-client", "alloy-rpc-trace-types", - "alloy-rpc-types", - "alloy-transport", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "alloy-transport-http", "async-trait", "auto_impl", @@ -168,11 +179,11 @@ dependencies = [ [[package]] name = "alloy-pubsub" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "alloy-primitives", - "alloy-transport", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "bimap", "futures", "serde", @@ -207,12 +218,12 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "alloy-primitives", "alloy-pubsub", - "alloy-transport", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "alloy-transport-http", "alloy-transport-ws", "futures", @@ -228,10 +239,10 @@ dependencies = [ [[package]] name = "alloy-rpc-trace-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "serde", "serde_json", ] @@ -249,6 +260,19 @@ dependencies = [ "thiserror", ] +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "alloy-sol-macro" version = "0.6.3" @@ -284,8 +308,25 @@ name = "alloy-transport" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", + "base64 0.21.7", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "alloy-transport" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" +dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "base64 0.21.7", + "futures-util", "serde", "serde_json", "thiserror", @@ -298,10 +339,10 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc", - "alloy-transport", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "reqwest", "serde_json", "tower", @@ -311,10 +352,10 @@ dependencies = [ [[package]] name = "alloy-transport-ws" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" +source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-pubsub", - "alloy-transport", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "futures", "http 0.2.11", "serde_json", @@ -407,7 +448,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e)", + "kinode_process_lib 0.6.0", "rand 0.8.5", "serde", "serde_json", @@ -1967,7 +2008,7 @@ dependencies = [ name = "get_block" version = "0.1.0" dependencies = [ - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=567ba78)", "serde", "serde_json", "wit-bindgen", @@ -2563,7 +2604,7 @@ dependencies = [ "alloy-providers", "alloy-pubsub", "alloy-rpc-client", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "alloy-transport-ws", "anyhow", "async-trait", @@ -2631,6 +2672,26 @@ dependencies = [ "lib", ] +[[package]] +name = "kinode_process_lib" +version = "0.6.0" +dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "anyhow", + "bincode", + "http 1.0.0", + "mime_guess", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror", + "url", + "wit-bindgen", +] + [[package]] name = "kinode_process_lib" version = "0.6.0" @@ -2653,10 +2714,10 @@ name = "kinode_process_lib" version = "0.6.0" source = "git+https://github.com/kinode-dao/process_lib?rev=3232423#323242399efdcdad02e7f31bb6a9cc5eec048610" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", "alloy-primitives", - "alloy-rpc-types", - "alloy-transport", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", "anyhow", "bincode", "http 1.0.0", @@ -2672,12 +2733,12 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e#7b6fd6ee160299514fee30b315a2a53fbfb434d7" +source = "git+https://github.com/kinode-dao/process_lib?rev=567ba78#567ba7830ba387625e668c61f252f98ff849d6eb" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", "alloy-primitives", - "alloy-rpc-types", - "alloy-transport", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", "anyhow", "bincode", "http 1.0.0", @@ -2745,7 +2806,7 @@ dependencies = [ "anyhow", "bincode", "hex", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=7b6fd6e)", + "kinode_process_lib 0.6.0", "rmp-serde", "serde", "serde_json", @@ -2775,7 +2836,7 @@ name = "lib" version = "0.6.0" dependencies = [ "alloy-pubsub", - "alloy-rpc-types", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "lazy_static", "rand 0.8.5", "reqwest", diff --git a/kinode/Cargo.toml b/kinode/Cargo.toml index c14595431..7b99d4014 100644 --- a/kinode/Cargo.toml +++ b/kinode/Cargo.toml @@ -26,11 +26,11 @@ simulation-mode = [] [dependencies] aes-gcm = "0.10.2" -alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56" } -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56" } -alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56", features = ["ws"]} -alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56" } -alloy-providers = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56" } +alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4", features = ["ws"]} +alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" } +alloy-providers = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" } anyhow = "1.0.71" async-trait = "0.1.71" base64 = "0.13" diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index 3e46a815c..19abedb63 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -13,7 +13,7 @@ "chain_id": 11155111, "trusted": false, "provider": { - "RpcUrl": "wss://eth-sepolia.g.alchemy.com/v2/a4bRKYnvC0uT2l1rzVDAvldH3OPKQnKm" + "RpcUrl": "wss://eth-sepolia.g.alchemy.com/v2/iZZorIE5O93pUSAqyB3INvdxJZ8od_ro" }, "public": false, "allow": [], diff --git a/kinode/packages/app_store/app_store/Cargo.toml b/kinode/packages/app_store/app_store/Cargo.toml index d6d948a14..a6c5521b1 100644 --- a/kinode/packages/app_store/app_store/Cargo.toml +++ b/kinode/packages/app_store/app_store/Cargo.toml @@ -9,7 +9,8 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "7b6fd6e" } +# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "567ba78" } +kinode_process_lib = { path = "../../../../../process_lib" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/app_store/app_store/src/lib.rs b/kinode/packages/app_store/app_store/src/lib.rs index 02ffd2085..844428a36 100644 --- a/kinode/packages/app_store/app_store/src/lib.rs +++ b/kinode/packages/app_store/app_store/src/lib.rs @@ -1,6 +1,5 @@ use kinode_process_lib::eth::{ - get_logs, subscribe, unsubscribe, Address as EthAddress, EthSub, EthSubResult, Filter, - SubscriptionResult, + Address as EthAddress, EthSub, EthSubResult, Filter, Log, SubscriptionResult, }; use kinode_process_lib::http::{bind_http_path, serve_ui, HttpServerRequest}; use kinode_process_lib::kernel_types as kt; @@ -72,6 +71,32 @@ pub enum Resp { FTWorkerResult(FTWorkerResult), } +fn fetch_logs(eth_provider: ð::Provider, filter: &Filter) -> Vec { + loop { + match eth_provider.get_logs(filter) { + Ok(res) => return res, + Err(_) => { + println!("app store: failed to fetch logs! trying again in 5s..."); + std::thread::sleep(std::time::Duration::from_secs(5)); + continue; + } + } + } +} + +fn subscribe_to_logs(eth_provider: ð::Provider, filter: Filter) { + loop { + match eth_provider.subscribe(1, filter.clone()) { + Ok(()) => break, + Err(_) => { + println!("app store: failed to subscribe to chain! trying again in 5s..."); + std::thread::sleep(std::time::Duration::from_secs(5)); + continue; + } + } + } +} + call_init!(init); fn init(our: Address) { println!("{}: started", our.package()); @@ -110,25 +135,26 @@ fn init(our: Address) { state.contract_address ); + // create new provider for sepolia with request-timeout of 60s + // can change, log requests can take quite a long time. + let eth_provider = eth::Provider::new(CHAIN_ID, 30); + let mut requested_packages: HashMap = HashMap::new(); // get past logs, subscribe to new ones. let filter = Filter::new() .address(EthAddress::from_str(&state.contract_address).unwrap()) .from_block(state.last_saved_block - 1) - .events(EVENTS); - - let logs = get_logs(CHAIN_ID, &filter); + .event(EVENTS[0]) + .event(EVENTS[1]) + .event(EVENTS[2]); - if let Ok(logs) = logs { - for log in logs { - if let Err(e) = state.ingest_listings_contract_event(&our, log) { - println!("app store: error ingesting log: {e:?}"); - }; - } + for log in fetch_logs(ð_provider, &filter) { + if let Err(e) = state.ingest_listings_contract_event(&our, log) { + println!("app store: error ingesting log: {e:?}"); + }; } - - subscribe(1, CHAIN_ID, filter).unwrap(); + subscribe_to_logs(ð_provider, filter); loop { match await_message() { @@ -137,8 +163,13 @@ fn init(our: Address) { println!("app store: got network error: {send_error}"); } Ok(message) => { - if let Err(e) = handle_message(&our, &mut state, &mut requested_packages, &message) - { + if let Err(e) = handle_message( + &our, + &mut state, + ð_provider, + &mut requested_packages, + &message, + ) { println!("app store: error handling message: {:?}", e) } } @@ -153,6 +184,7 @@ fn init(our: Address) { fn handle_message( our: &Address, mut state: &mut State, + eth_provider: ð::Provider, mut requested_packages: &mut HashMap, message: &Message, ) -> anyhow::Result<()> { @@ -167,8 +199,13 @@ fn handle_message( if our.node != source.node { return Err(anyhow::anyhow!("local request from non-local node")); } - let resp = - handle_local_request(&our, &local_request, &mut state, &mut requested_packages); + let resp = handle_local_request( + &our, + &local_request, + &mut state, + eth_provider, + &mut requested_packages, + ); if expects_response.is_some() { Response::new().body(serde_json::to_vec(&resp)?).send()?; } @@ -272,6 +309,7 @@ fn handle_local_request( our: &Address, request: &LocalRequest, state: &mut State, + eth_provider: ð::Provider, requested_packages: &mut HashMap, ) -> LocalResponse { match request { @@ -341,24 +379,23 @@ fn handle_local_request( LocalRequest::RebuildIndex => { *state = State::new(CONTRACT_ADDRESS.to_string()).unwrap(); // kill our old subscription and build a new one. - unsubscribe(1).unwrap(); + eth_provider + .unsubscribe(1) + .expect("app_store: failed to unsub from eth events!"); let filter = Filter::new() .address(EthAddress::from_str(&state.contract_address).unwrap()) .from_block(state.last_saved_block - 1) - .events(EVENTS); - - let logs = get_logs(CHAIN_ID, &filter); - - if let Ok(logs) = logs { - for log in logs { - if let Err(e) = state.ingest_listings_contract_event(our, log) { - println!("app store: error ingesting log: {e:?}"); - }; - } + .event(EVENTS[0]) + .event(EVENTS[1]) + .event(EVENTS[2]); + + for log in fetch_logs(ð_provider, &filter) { + if let Err(e) = state.ingest_listings_contract_event(our, log) { + println!("app store: error ingesting log: {e:?}"); + }; } - subscribe(1, CHAIN_ID, filter).unwrap(); - + subscribe_to_logs(ð_provider, filter); LocalResponse::RebuiltIndex } } diff --git a/kinode/packages/kns_indexer/get_block/Cargo.toml b/kinode/packages/kns_indexer/get_block/Cargo.toml index 9ea60ea29..ec4e1e974 100644 --- a/kinode/packages/kns_indexer/get_block/Cargo.toml +++ b/kinode/packages/kns_indexer/get_block/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "7b6fd6e" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "567ba78" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/kns_indexer/get_block/src/lib.rs b/kinode/packages/kns_indexer/get_block/src/lib.rs index a4e561117..1c159321c 100644 --- a/kinode/packages/kns_indexer/get_block/src/lib.rs +++ b/kinode/packages/kns_indexer/get_block/src/lib.rs @@ -1,5 +1,5 @@ use kinode_process_lib::{ - await_next_request_body, call_init, eth::get_block_number, println, Address, + await_next_request_body, call_init, println, Address, eth, }; wit_bindgen::generate!({ @@ -24,12 +24,15 @@ fn init(_our: Address) { .parse::() .unwrap_or(1); - match get_block_number(chain_id) { + // request timeout of 5s + let provider = eth::Provider::new(chain_id, 5); + + match provider.get_block_number() { Ok(block_number) => { println!("latest block number: {block_number}"); } Err(e) => { - println!("get_block: failed to get block number: {}", e); + println!("failed to get block number: {e:?}"); } } } diff --git a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml index 6da9a69d4..cf6f45265 100644 --- a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml +++ b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml @@ -10,7 +10,8 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" bincode = "1.3.3" hex = "0.4.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "7b6fd6e" } +# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "567ba78" } +kinode_process_lib = { path = "../../../../../process_lib" } rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index b27f97c57..b8a5ba0d4 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -1,10 +1,9 @@ use alloy_sol_types::{sol, SolEvent}; - use kinode_process_lib::{ await_message, eth::{ - get_block_number, get_logs, subscribe, Address as EthAddress, BlockNumberOrTag, EthSub, - EthSubResult, Filter, Log, SubscriptionResult, + Address as EthAddress, BlockNumberOrTag, EthSub, EthSubResult, Filter, Log, Provider, + SubscriptionResult, }, get_typed_state, print_to_terminal, println, set_state, Address, Message, Request, Response, }; @@ -100,6 +99,19 @@ sol! { event RoutingUpdate(bytes32 indexed node, bytes32[] routers); } +fn subscribe_to_logs(eth_provider: &Provider, filter: Filter) { + loop { + match eth_provider.subscribe(1, filter.clone()) { + Ok(()) => break, + Err(_) => { + println!("kns_indexer: failed to subscribe to chain! trying again in 5s..."); + std::thread::sleep(std::time::Duration::from_secs(5)); + continue; + } + } + } +} + struct Component; impl Guest for Component { fn init(our: String) { @@ -173,19 +185,32 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { .address(state.contract_address.parse::().unwrap()) .to_block(BlockNumberOrTag::Latest) .from_block(state.block - 1) - .events(vec![ - "NodeRegistered(bytes32,bytes)", - "KeyUpdate(bytes32,bytes32)", - "IpUpdate(bytes32,uint128)", - "WsUpdate(bytes32,uint16)", - "RoutingUpdate(bytes32,bytes32[])", - ]); + .event("NodeRegistered(bytes32,bytes)") + .event("KeyUpdate(bytes32,bytes32)") + .event("IpUpdate(bytes32,uint128)") + .event("WsUpdate(bytes32,uint16)") + .event("RoutingUpdate(bytes32,bytes32[])"); + + // 60s timeout -- these calls can take a long time + // if they do time out, we try them again + let eth_provider = Provider::new(state.chain_id, 20); // if block in state is < current_block, get logs from that part. - if state.block < get_block_number(state.chain_id)? { - let logs = get_logs(state.chain_id, &filter)?; - for log in logs { - handle_log(&our, &mut state, &log)?; + if state.block < eth_provider.get_block_number().unwrap_or(u64::MAX) { + loop { + match eth_provider.get_logs(&filter) { + Ok(logs) => { + for log in logs { + handle_log(&our, &mut state, &log)?; + } + break; + } + Err(_) => { + println!("kns_indexer: failed to fetch logs! trying again in 5s..."); + std::thread::sleep(std::time::Duration::from_secs(5)); + continue; + } + } } } // shove all state into net::net @@ -198,7 +223,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { set_state(&bincode::serialize(&state)?); - subscribe(1, state.chain_id, filter.clone())?; + subscribe_to_logs(ð_provider, filter.clone()); let mut pending_requests: BTreeMap> = BTreeMap::new(); @@ -214,7 +239,14 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { }; if source.process == "eth:distro:sys" { - handle_eth_message(&our, &mut state, &mut pending_requests, &body, &filter)?; + handle_eth_message( + &our, + &mut state, + ð_provider, + &mut pending_requests, + &body, + &filter, + )?; } else { let Ok(request) = serde_json::from_slice::(&body) else { println!("kns_indexer: got invalid message"); @@ -254,6 +286,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { fn handle_eth_message( our: &Address, state: &mut State, + eth_provider: &Provider, pending_requests: &mut BTreeMap>, body: &[u8], filter: &Filter, @@ -270,7 +303,7 @@ fn handle_eth_message( } Err(e) => { println!("kns_indexer: got sub error, resubscribing.. {:?}", e.error); - subscribe(1, state.chain_id, filter.clone())?; + subscribe_to_logs(ð_provider, filter.clone()); } } diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index 07d7064b8..d0deae720 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -79,8 +79,15 @@ async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { url: provider.url.to_string(), auth: None, }; - let client = ClientBuilder::default().ws(connector).await?; + println!("here1\r"); + let client = tokio::time::timeout( + std::time::Duration::from_secs(10), + ClientBuilder::default().ws(connector), + ) + .await??; + println!("here2\r"); provider.pubsub = Some(Provider::new_with_client(client)); + println!("here3\r"); Ok(()) } _ => Err(anyhow::anyhow!( @@ -160,6 +167,7 @@ async fn handle_message( match &km.message { Message::Response(_) => handle_passthrough_response(our, send_to_loop, km).await, Message::Request(req) => { + let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config if let Ok(eth_action) = serde_json::from_slice(&req.body) { // these can be from remote or local processes return handle_eth_action( @@ -167,6 +175,7 @@ async fn handle_message( access_settings, send_to_loop, km, + timeout, eth_action, providers, active_subscriptions, @@ -218,6 +227,7 @@ async fn handle_eth_action( access_settings: &mut AccessSettings, send_to_loop: &MessageSender, km: KernelMessage, + timeout: u64, eth_action: EthAction, providers: &mut Providers, active_subscriptions: &mut ActiveSubscriptions, @@ -242,7 +252,21 @@ async fn handle_eth_action( // before returning an error. match eth_action { EthAction::SubscribeLogs { sub_id, .. } => { - let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( + // let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( + // our.to_string(), + // km.id, + // km.source.clone(), + // km.rsvp, + // send_to_loop.clone(), + // eth_action, + // providers.clone(), + // active_subscriptions.clone(), + // ))); + // let mut subs = active_subscriptions + // .entry(km.source.process) + // .or_insert(HashMap::new()); + // subs.insert(sub_id, new_sub); + create_new_subscription( our.to_string(), km.id, km.source.clone(), @@ -251,11 +275,8 @@ async fn handle_eth_action( eth_action, providers.clone(), active_subscriptions.clone(), - ))); - let mut subs = active_subscriptions - .entry(km.source.process) - .or_insert(HashMap::new()); - subs.insert(sub_id, new_sub); + ) + .await } EthAction::UnsubscribeLogs(sub_id) => { active_subscriptions @@ -274,21 +295,22 @@ async fn handle_eth_action( }); } EthAction::Request { .. } => { - tokio::spawn(fulfill_request( + fulfill_request( our.to_string(), km.id, km.source.clone(), km.rsvp, + timeout, send_to_loop.clone(), eth_action, providers.clone(), - )); + ) + .await; } } Ok(()) } -/// spawned as a task /// cleans itself up when the subscription is closed or fails. async fn create_new_subscription( our: String, @@ -475,6 +497,7 @@ async fn fulfill_request( km_id: u64, target: Address, rsvp: Option
, + timeout: u64, send_to_loop: MessageSender, eth_action: EthAction, providers: Providers, @@ -524,7 +547,34 @@ async fn fulfill_request( } } }; - let response = pubsub.inner().prepare(method, params.clone()).await; + println!("here5\r"); + let connector = WsConnect { + url: url_provider.url.to_string(), + auth: None, + }; + let client = tokio::time::timeout( + std::time::Duration::from_secs(10), + ClientBuilder::default().ws(connector), + ) + .await.unwrap().unwrap(); + println!("here6\r"); + let provider = Provider::new_with_client(client); + println!("method: {method:?}\r"); + println!("params: {params:?}\r"); + let response = provider.inner().prepare(method, params.clone()).await; + println!("res: {response:?}\r"); + // let Ok(response) = tokio::time::timeout( + // std::time::Duration::from_secs(timeout), + // pubsub.inner().prepare(method, params.clone()), + // ) + // .await + // else { + // println!("what the FUCK\r"); + // // this provider failed and needs to be reset + // url_provider.pubsub = None; + // continue; + // }; + println!("here6\r"); if let Ok(value) = response { send_to_loop .send(KernelMessage { diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f9d264973..15c6995cd 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -14,8 +14,8 @@ license = "Apache-2.0" reqwest = { version = "0.11.22", features = ["blocking"] } [dependencies] -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56" } -alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "098ad56" } +alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "6f8ebb4" } lazy_static = "1.4.0" rand = "0.8.4" ring = "0.16.20" diff --git a/lib/src/eth.rs b/lib/src/eth.rs index 04cd0d09a..e06830e13 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -47,7 +47,7 @@ pub struct EthSubError { } /// The Response type which a process will get from requesting with an [`EthAction`] will be -/// of the form `Result<(), EthError>`, serialized and deserialized using `serde_json::to_vec` +/// of this type, serialized and deserialized using `serde_json::to_vec` /// and `serde_json::from_slice`. #[derive(Debug, Serialize, Deserialize)] pub enum EthResponse { @@ -68,10 +68,14 @@ pub enum EthError { SubscriptionNotFound, /// Invalid method InvalidMethod(String), + /// Invalid params + InvalidParams, /// Permission denied PermissionDenied, /// Internal RPC error RpcError(String), + /// RPC timed out + RpcTimeout, } /// The action type used for configuring eth:distro:sys. Only processes which have the "root" From d07ab9784fe2c0965d72e6edba5a3ee3f187046e Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 26 Feb 2024 02:39:22 -0300 Subject: [PATCH 08/23] update proces_lib with fix, provider working again for urls --- Cargo.lock | 38 +++-------- .../packages/app_store/app_store/Cargo.toml | 3 +- .../packages/app_store/app_store/src/lib.rs | 8 +-- .../packages/kns_indexer/get_block/Cargo.toml | 2 +- .../kns_indexer/kns_indexer/Cargo.toml | 3 +- .../kns_indexer/kns_indexer/src/lib.rs | 12 ++-- kinode/src/eth/provider.rs | 67 +++++-------------- 7 files changed, 38 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f980da49..93596fac2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -448,7 +448,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bincode", - "kinode_process_lib 0.6.0", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=d1c29c2)", "rand 0.8.5", "serde", "serde_json", @@ -2008,7 +2008,7 @@ dependencies = [ name = "get_block" version = "0.1.0" dependencies = [ - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=567ba78)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=d1c29c2)", "serde", "serde_json", "wit-bindgen", @@ -2672,26 +2672,6 @@ dependencies = [ "lib", ] -[[package]] -name = "kinode_process_lib" -version = "0.6.0" -dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "anyhow", - "bincode", - "http 1.0.0", - "mime_guess", - "rand 0.8.5", - "serde", - "serde_json", - "thiserror", - "url", - "wit-bindgen", -] - [[package]] name = "kinode_process_lib" version = "0.6.0" @@ -2733,12 +2713,8 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=567ba78#567ba7830ba387625e668c61f252f98ff849d6eb" +source = "git+https://github.com/kinode-dao/process_lib.git?rev=9d185e1#9d185e1e264c93af53d004ba32520fd5d046e7e5" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", - "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", "anyhow", "bincode", "http 1.0.0", @@ -2754,8 +2730,12 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib.git?rev=9d185e1#9d185e1e264c93af53d004ba32520fd5d046e7e5" +source = "git+https://github.com/kinode-dao/process_lib?rev=d1c29c2#d1c29c20ab7be0e1cf98ee96711b719e147cfc56" dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "anyhow", "bincode", "http 1.0.0", @@ -2806,7 +2786,7 @@ dependencies = [ "anyhow", "bincode", "hex", - "kinode_process_lib 0.6.0", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=d1c29c2)", "rmp-serde", "serde", "serde_json", diff --git a/kinode/packages/app_store/app_store/Cargo.toml b/kinode/packages/app_store/app_store/Cargo.toml index a6c5521b1..be213dab9 100644 --- a/kinode/packages/app_store/app_store/Cargo.toml +++ b/kinode/packages/app_store/app_store/Cargo.toml @@ -9,8 +9,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" anyhow = "1.0" bincode = "1.3.3" -# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "567ba78" } -kinode_process_lib = { path = "../../../../../process_lib" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "d1c29c2" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/app_store/app_store/src/lib.rs b/kinode/packages/app_store/app_store/src/lib.rs index 844428a36..8e59f8c00 100644 --- a/kinode/packages/app_store/app_store/src/lib.rs +++ b/kinode/packages/app_store/app_store/src/lib.rs @@ -145,9 +145,7 @@ fn init(our: Address) { let filter = Filter::new() .address(EthAddress::from_str(&state.contract_address).unwrap()) .from_block(state.last_saved_block - 1) - .event(EVENTS[0]) - .event(EVENTS[1]) - .event(EVENTS[2]); + .events(EVENTS); for log in fetch_logs(ð_provider, &filter) { if let Err(e) = state.ingest_listings_contract_event(&our, log) { @@ -386,9 +384,7 @@ fn handle_local_request( let filter = Filter::new() .address(EthAddress::from_str(&state.contract_address).unwrap()) .from_block(state.last_saved_block - 1) - .event(EVENTS[0]) - .event(EVENTS[1]) - .event(EVENTS[2]); + .events(EVENTS); for log in fetch_logs(ð_provider, &filter) { if let Err(e) = state.ingest_listings_contract_event(our, log) { diff --git a/kinode/packages/kns_indexer/get_block/Cargo.toml b/kinode/packages/kns_indexer/get_block/Cargo.toml index ec4e1e974..37674ec1c 100644 --- a/kinode/packages/kns_indexer/get_block/Cargo.toml +++ b/kinode/packages/kns_indexer/get_block/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "567ba78" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "d1c29c2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml index cf6f45265..344b617a0 100644 --- a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml +++ b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml @@ -10,8 +10,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" bincode = "1.3.3" hex = "0.4.3" -# kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "567ba78" } -kinode_process_lib = { path = "../../../../../process_lib" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "d1c29c2" } rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index b8a5ba0d4..8573d2f61 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -185,11 +185,13 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { .address(state.contract_address.parse::().unwrap()) .to_block(BlockNumberOrTag::Latest) .from_block(state.block - 1) - .event("NodeRegistered(bytes32,bytes)") - .event("KeyUpdate(bytes32,bytes32)") - .event("IpUpdate(bytes32,uint128)") - .event("WsUpdate(bytes32,uint16)") - .event("RoutingUpdate(bytes32,bytes32[])"); + .events(vec![ + "NodeRegistered(bytes32,bytes)", + "KeyUpdate(bytes32,bytes32)", + "IpUpdate(bytes32,uint128)", + "WsUpdate(bytes32,uint16)", + "RoutingUpdate(bytes32,bytes32[])" + ]); // 60s timeout -- these calls can take a long time // if they do time out, we try them again diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index d0deae720..219119c71 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -79,15 +79,12 @@ async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { url: provider.url.to_string(), auth: None, }; - println!("here1\r"); let client = tokio::time::timeout( std::time::Duration::from_secs(10), ClientBuilder::default().ws(connector), ) .await??; - println!("here2\r"); provider.pubsub = Some(Provider::new_with_client(client)); - println!("here3\r"); Ok(()) } _ => Err(anyhow::anyhow!( @@ -252,21 +249,7 @@ async fn handle_eth_action( // before returning an error. match eth_action { EthAction::SubscribeLogs { sub_id, .. } => { - // let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( - // our.to_string(), - // km.id, - // km.source.clone(), - // km.rsvp, - // send_to_loop.clone(), - // eth_action, - // providers.clone(), - // active_subscriptions.clone(), - // ))); - // let mut subs = active_subscriptions - // .entry(km.source.process) - // .or_insert(HashMap::new()); - // subs.insert(sub_id, new_sub); - create_new_subscription( + let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( our.to_string(), km.id, km.source.clone(), @@ -275,8 +258,11 @@ async fn handle_eth_action( eth_action, providers.clone(), active_subscriptions.clone(), - ) - .await + ))); + let mut subs = active_subscriptions + .entry(km.source.process) + .or_insert(HashMap::new()); + subs.insert(sub_id, new_sub); } EthAction::UnsubscribeLogs(sub_id) => { active_subscriptions @@ -295,7 +281,7 @@ async fn handle_eth_action( }); } EthAction::Request { .. } => { - fulfill_request( + tokio::spawn(fulfill_request( our.to_string(), km.id, km.source.clone(), @@ -304,8 +290,7 @@ async fn handle_eth_action( send_to_loop.clone(), eth_action, providers.clone(), - ) - .await; + )); } } Ok(()) @@ -547,34 +532,16 @@ async fn fulfill_request( } } }; - println!("here5\r"); - let connector = WsConnect { - url: url_provider.url.to_string(), - auth: None, - }; - let client = tokio::time::timeout( - std::time::Duration::from_secs(10), - ClientBuilder::default().ws(connector), + let Ok(response) = tokio::time::timeout( + std::time::Duration::from_secs(timeout), + pubsub.inner().prepare(method, params.clone()), ) - .await.unwrap().unwrap(); - println!("here6\r"); - let provider = Provider::new_with_client(client); - println!("method: {method:?}\r"); - println!("params: {params:?}\r"); - let response = provider.inner().prepare(method, params.clone()).await; - println!("res: {response:?}\r"); - // let Ok(response) = tokio::time::timeout( - // std::time::Duration::from_secs(timeout), - // pubsub.inner().prepare(method, params.clone()), - // ) - // .await - // else { - // println!("what the FUCK\r"); - // // this provider failed and needs to be reset - // url_provider.pubsub = None; - // continue; - // }; - println!("here6\r"); + .await + else { + // this provider failed and needs to be reset + url_provider.pubsub = None; + continue; + }; if let Ok(value) = response { send_to_loop .send(KernelMessage { From 07a25a475794e3ca2b644bd9699623aa3a2d747c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 05:39:49 +0000 Subject: [PATCH 09/23] Format Rust code using rustfmt --- kinode/packages/kns_indexer/get_block/src/lib.rs | 4 +--- kinode/packages/kns_indexer/kns_indexer/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/kinode/packages/kns_indexer/get_block/src/lib.rs b/kinode/packages/kns_indexer/get_block/src/lib.rs index 1c159321c..11c9c2356 100644 --- a/kinode/packages/kns_indexer/get_block/src/lib.rs +++ b/kinode/packages/kns_indexer/get_block/src/lib.rs @@ -1,6 +1,4 @@ -use kinode_process_lib::{ - await_next_request_body, call_init, println, Address, eth, -}; +use kinode_process_lib::{await_next_request_body, call_init, eth, println, Address}; wit_bindgen::generate!({ path: "wit", diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index 8573d2f61..4d166040f 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -190,7 +190,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { "KeyUpdate(bytes32,bytes32)", "IpUpdate(bytes32,uint128)", "WsUpdate(bytes32,uint16)", - "RoutingUpdate(bytes32,bytes32[])" + "RoutingUpdate(bytes32,bytes32[])", ]); // 60s timeout -- these calls can take a long time From 8d98f1a6e729b678f04a59dce4ab79ee47cfbe08 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 26 Feb 2024 17:20:56 -0300 Subject: [PATCH 10/23] eth: cleanup, more to come, provider filled out for rpc urls and node providers --- kinode/default_providers_testnet.json | 2 +- .../packages/app_store/app_store/src/lib.rs | 24 +- .../kns_indexer/kns_indexer/src/lib.rs | 32 +- kinode/src/eth/mod.rs | 2 - kinode/src/eth/provider.rs | 732 ++++++++++++------ lib/src/eth.rs | 18 +- 6 files changed, 518 insertions(+), 292 deletions(-) diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index 19abedb63..5b4d0496b 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -13,7 +13,7 @@ "chain_id": 11155111, "trusted": false, "provider": { - "RpcUrl": "wss://eth-sepolia.g.alchemy.com/v2/iZZorIE5O93pUSAqyB3INvdxJZ8od_ro" + "RpcUrl": "wss://eth-sepolia.g.alchemy.com/v2/already_deleted_it" }, "public": false, "allow": [], diff --git a/kinode/packages/app_store/app_store/src/lib.rs b/kinode/packages/app_store/app_store/src/lib.rs index 8e59f8c00..9267b66f7 100644 --- a/kinode/packages/app_store/app_store/src/lib.rs +++ b/kinode/packages/app_store/app_store/src/lib.rs @@ -1,6 +1,3 @@ -use kinode_process_lib::eth::{ - Address as EthAddress, EthSub, EthSubResult, Filter, Log, SubscriptionResult, -}; use kinode_process_lib::http::{bind_http_path, serve_ui, HttpServerRequest}; use kinode_process_lib::kernel_types as kt; use kinode_process_lib::*; @@ -59,7 +56,7 @@ pub enum Req { RemoteRequest(RemoteRequest), FTWorkerCommand(FTWorkerCommand), FTWorkerResult(FTWorkerResult), - Eth(EthSubResult), + Eth(eth::EthSubResult), Http(HttpServerRequest), } @@ -71,7 +68,7 @@ pub enum Resp { FTWorkerResult(FTWorkerResult), } -fn fetch_logs(eth_provider: ð::Provider, filter: &Filter) -> Vec { +fn fetch_logs(eth_provider: ð::Provider, filter: ð::Filter) -> Vec { loop { match eth_provider.get_logs(filter) { Ok(res) => return res, @@ -84,7 +81,7 @@ fn fetch_logs(eth_provider: ð::Provider, filter: &Filter) -> Vec { } } -fn subscribe_to_logs(eth_provider: ð::Provider, filter: Filter) { +fn subscribe_to_logs(eth_provider: ð::Provider, filter: eth::Filter) { loop { match eth_provider.subscribe(1, filter.clone()) { Ok(()) => break, @@ -142,9 +139,10 @@ fn init(our: Address) { let mut requested_packages: HashMap = HashMap::new(); // get past logs, subscribe to new ones. - let filter = Filter::new() - .address(EthAddress::from_str(&state.contract_address).unwrap()) + let filter = eth::Filter::new() + .address(eth::Address::from_str(&state.contract_address).unwrap()) .from_block(state.last_saved_block - 1) + .to_block(eth::BlockNumberOrTag::Latest) .events(EVENTS); for log in fetch_logs(ð_provider, &filter) { @@ -227,7 +225,7 @@ fn handle_message( if source.node() != our.node() || source.process != "eth:distro:sys" { return Err(anyhow::anyhow!("eth sub event from weird addr: {source}")); } - if let Ok(EthSub { result, .. }) = eth_result { + if let Ok(eth::EthSub { result, .. }) = eth_result { handle_eth_sub_event(our, &mut state, result)?; } else { println!("app store: got eth sub error: {eth_result:?}"); @@ -381,8 +379,8 @@ fn handle_local_request( .unsubscribe(1) .expect("app_store: failed to unsub from eth events!"); - let filter = Filter::new() - .address(EthAddress::from_str(&state.contract_address).unwrap()) + let filter = eth::Filter::new() + .address(eth::Address::from_str(&state.contract_address).unwrap()) .from_block(state.last_saved_block - 1) .events(EVENTS); @@ -580,9 +578,9 @@ fn handle_ft_worker_result(body: &[u8], context: &[u8]) -> anyhow::Result<()> { fn handle_eth_sub_event( our: &Address, state: &mut State, - event: SubscriptionResult, + event: eth::SubscriptionResult, ) -> anyhow::Result<()> { - let SubscriptionResult::Log(log) = event else { + let eth::SubscriptionResult::Log(log) = event else { return Err(anyhow::anyhow!("app store: got non-log event")); }; state.ingest_listings_contract_event(our, *log) diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index 4d166040f..346aa5f3e 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -1,11 +1,7 @@ use alloy_sol_types::{sol, SolEvent}; use kinode_process_lib::{ - await_message, - eth::{ - Address as EthAddress, BlockNumberOrTag, EthSub, EthSubResult, Filter, Log, Provider, - SubscriptionResult, - }, - get_typed_state, print_to_terminal, println, set_state, Address, Message, Request, Response, + await_message, eth, get_typed_state, print_to_terminal, println, set_state, Address, Message, + Request, Response, }; use serde::{Deserialize, Serialize}; use std::collections::{ @@ -99,7 +95,7 @@ sol! { event RoutingUpdate(bytes32 indexed node, bytes32[] routers); } -fn subscribe_to_logs(eth_provider: &Provider, filter: Filter) { +fn subscribe_to_logs(eth_provider: ð::Provider, filter: eth::Filter) { loop { match eth_provider.subscribe(1, filter.clone()) { Ok(()) => break, @@ -181,10 +177,10 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { ))? .send()?; - let filter = Filter::new() - .address(state.contract_address.parse::().unwrap()) - .to_block(BlockNumberOrTag::Latest) + let filter = eth::Filter::new() + .address(state.contract_address.parse::().unwrap()) .from_block(state.block - 1) + .to_block(eth::BlockNumberOrTag::Latest) .events(vec![ "NodeRegistered(bytes32,bytes)", "KeyUpdate(bytes32,bytes32)", @@ -195,7 +191,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { // 60s timeout -- these calls can take a long time // if they do time out, we try them again - let eth_provider = Provider::new(state.chain_id, 20); + let eth_provider = eth::Provider::new(state.chain_id, 20); // if block in state is < current_block, get logs from that part. if state.block < eth_provider.get_block_number().unwrap_or(u64::MAX) { @@ -288,18 +284,18 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { fn handle_eth_message( our: &Address, state: &mut State, - eth_provider: &Provider, + eth_provider: ð::Provider, pending_requests: &mut BTreeMap>, body: &[u8], - filter: &Filter, + filter: ð::Filter, ) -> anyhow::Result<()> { - let Ok(eth_result) = serde_json::from_slice::(body) else { + let Ok(eth_result) = serde_json::from_slice::(body) else { return Err(anyhow::anyhow!("kns_indexer: got invalid message")); }; match eth_result { - Ok(EthSub { result, .. }) => { - if let SubscriptionResult::Log(log) = result { + Ok(eth::EthSub { result, .. }) => { + if let eth::SubscriptionResult::Log(log) = result { handle_log(our, state, &log)?; } } @@ -343,7 +339,7 @@ fn handle_eth_message( Ok(()) } -fn handle_log(our: &Address, state: &mut State, log: &Log) -> anyhow::Result<()> { +fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Result<()> { state.block = log.block_number.expect("expect").to::(); let node_id = log.topics[1]; @@ -422,7 +418,7 @@ fn handle_log(our: &Address, state: &mut State, log: &Log) -> anyhow::Result<()> Ok(()) } -fn get_name(log: &Log) -> String { +fn get_name(log: ð::Log) -> String { let decoded = NodeRegistered::abi_decode_data(&log.data, true).unwrap(); let name = match dnswire_decode(decoded.0.clone()) { Ok(n) => n, diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index 6257bab40..a6b442703 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -1,4 +1,2 @@ -#![allow(unused)] pub mod provider; - pub use lib::types::eth as types; diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index 219119c71..5bd51b55d 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -10,11 +10,19 @@ use lib::types::core::*; use lib::types::eth::*; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; -use std::str::FromStr; use std::sync::Arc; use tokio::task::JoinHandle; use url::Url; +/// meta-type for all incoming requests we need to handle +#[derive(Debug, Serialize, Deserialize)] +#[serde(untagged)] +enum IncomingReq { + EthAction(EthAction), + EthConfigAction(EthConfigAction), + EthSubResult(EthSubResult), +} + /// mapping of chain id to ordered lists of providers type Providers = Arc>; @@ -37,8 +45,8 @@ struct NodeProvider { pub name: String, } -/// existing subscriptions held by local processes -type ActiveSubscriptions = Arc>>; +/// existing subscriptions held by local OR remote processes +type ActiveSubscriptions = Arc>>; #[derive(Debug)] enum ActiveSub { @@ -101,9 +109,8 @@ pub async fn provider( send_to_loop: MessageSender, mut recv_in_client: MessageReceiver, caps_oracle: CapMessageSender, - print_tx: PrintSender, + _print_tx: PrintSender, ) -> Result<()> { - println!("provider: on\r"); let our = Arc::new(our); let mut access_settings = AccessSettings { @@ -122,11 +129,13 @@ pub async fn provider( ap.add_provider_config(entry); } - println!("providers: {providers:?}\r"); - // handles of longrunning subscriptions. let mut active_subscriptions: ActiveSubscriptions = Arc::new(DashMap::new()); + // channels to pass incoming responses to outstanding requests + // keyed by KM ID + let mut response_channels: Arc> = Arc::new(DashMap::new()); + while let Some(km) = recv_in_client.recv().await { let km_id = km.id; let response_target = km.rsvp.as_ref().unwrap_or(&km.source).clone(); @@ -138,12 +147,11 @@ pub async fn provider( &caps_oracle, &mut providers, &mut active_subscriptions, + &mut response_channels, ) .await { - let _ = send_to_loop - .send(make_error_message(&our, km_id, response_target, e)) - .await; + error_message(&our, km_id, response_target, e, &send_to_loop).await; }; } Err(anyhow::anyhow!("eth: fatal: message receiver closed!")) @@ -159,66 +167,83 @@ async fn handle_message( caps_oracle: &CapMessageSender, providers: &mut Providers, active_subscriptions: &mut ActiveSubscriptions, + response_channels: &mut Arc>, ) -> Result<(), EthError> { println!("provider: handle_message\r"); match &km.message { - Message::Response(_) => handle_passthrough_response(our, send_to_loop, km).await, + Message::Response(_) => { + // map response to the correct channel + if let Some((_id, sender)) = response_channels.remove(&km.id) { + // can't close channel here, as response may be an error + // and fullfill_request may wish to try other providers. + let _ = sender.send(km).await; + } else { + println!("eth: got weird response!!\r"); + } + Ok(()) + } Message::Request(req) => { let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config - if let Ok(eth_action) = serde_json::from_slice(&req.body) { - // these can be from remote or local processes - return handle_eth_action( - our, - access_settings, - send_to_loop, - km, - timeout, - eth_action, - providers, - active_subscriptions, - ) - .await; - } - if let Ok(eth_config_action) = serde_json::from_slice(&req.body) { - // only local node - return handle_eth_config_action( - our, - access_settings, - caps_oracle, - km, - eth_config_action, - providers, - ) - .await; + let Ok(req) = serde_json::from_slice::(&req.body) else { + return Err(EthError::MalformedRequest); + }; + match req { + IncomingReq::EthAction(eth_action) => { + handle_eth_action( + our, + access_settings, + send_to_loop, + km, + timeout, + eth_action, + providers, + active_subscriptions, + response_channels, + ) + .await + } + IncomingReq::EthConfigAction(eth_config_action) => { + kernel_message( + our, + km.id, + km.source.clone(), + km.rsvp.clone(), + false, + None, + handle_eth_config_action( + our, + access_settings, + caps_oracle, + &km, + eth_config_action, + providers, + ) + .await, + send_to_loop, + ) + .await; + Ok(()) + } + IncomingReq::EthSubResult(eth_sub_result) => { + // forward this to rsvp + kernel_message( + our, + km.id, + km.source.clone(), + km.rsvp.clone(), + true, + None, + eth_sub_result, + send_to_loop, + ) + .await; + Ok(()) + } } - Err(EthError::PermissionDenied) } } } -async fn handle_passthrough_response( - our: &str, - send_to_loop: &MessageSender, - km: KernelMessage, -) -> Result<(), EthError> { - println!("provider: handle_passthrough_response\r"); - send_to_loop - .send(KernelMessage { - id: rand::random(), - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: km.rsvp.unwrap_or(km.source), - rsvp: None, - message: km.message, - lazy_load_blob: None, - }) - .await - .expect("eth: kernel sender died!"); - Ok(()) -} - async fn handle_eth_action( our: &str, access_settings: &mut AccessSettings, @@ -228,6 +253,7 @@ async fn handle_eth_action( eth_action: EthAction, providers: &mut Providers, active_subscriptions: &mut ActiveSubscriptions, + response_channels: &mut Arc>, ) -> Result<(), EthError> { println!("provider: handle_eth_action: {eth_action:?}\r"); // check our access settings if the request is from a remote node @@ -249,48 +275,85 @@ async fn handle_eth_action( // before returning an error. match eth_action { EthAction::SubscribeLogs { sub_id, .. } => { - let new_sub = ActiveSub::Local(tokio::spawn(create_new_subscription( + create_new_subscription( our.to_string(), km.id, km.source.clone(), km.rsvp, send_to_loop.clone(), + sub_id, eth_action, providers.clone(), active_subscriptions.clone(), - ))); - let mut subs = active_subscriptions - .entry(km.source.process) - .or_insert(HashMap::new()); - subs.insert(sub_id, new_sub); + response_channels.clone(), + ) + .await; } EthAction::UnsubscribeLogs(sub_id) => { - active_subscriptions - .entry(km.source.process) - .and_modify(|sub_map| { - if let Some(sub) = sub_map.get_mut(&sub_id) { - match sub { - ActiveSub::Local(handle) => { - handle.abort(); - } - ActiveSub::Remote(node) => { - // TODO send to them asking to abort - } - } + let mut sub_map = active_subscriptions + .entry(km.source) + .or_insert(HashMap::new()); + if let Some(sub) = sub_map.remove(&sub_id) { + match sub { + ActiveSub::Local(handle) => { + handle.abort(); } - }); + ActiveSub::Remote(node) => { + kernel_message( + our, + rand::random(), + Address { + node: node.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + Some(60), // TODO + serde_json::to_vec(ð_action).unwrap(), + send_to_loop, + ) + .await; + } + } + } } EthAction::Request { .. } => { - tokio::spawn(fulfill_request( - our.to_string(), - km.id, - km.source.clone(), - km.rsvp, - timeout, - send_to_loop.clone(), - eth_action, - providers.clone(), - )); + let (sender, receiver) = tokio::sync::mpsc::channel(1); + response_channels.insert(km.id, sender); + let our = our.to_string(); + let send_to_loop = send_to_loop.clone(); + let providers = providers.clone(); + let response_channels = response_channels.clone(); + tokio::spawn(async move { + let res = tokio::time::timeout( + std::time::Duration::from_secs(timeout), + fulfill_request(&our, &send_to_loop, eth_action, providers, receiver), + ) + .await; + match res { + Ok(Ok(response)) => { + kernel_message( + &our, + km.id, + km.source, + km.rsvp, + false, + None, + response, + &send_to_loop, + ) + .await; + } + Ok(Err(e)) => { + error_message(&our, km.id, km.source, e, &send_to_loop).await; + } + Err(_) => { + error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) + .await; + } + } + response_channels.remove(&km.id); + }); } } Ok(()) @@ -303,9 +366,11 @@ async fn create_new_subscription( target: Address, rsvp: Option
, send_to_loop: MessageSender, + sub_id: u64, eth_action: EthAction, providers: Providers, active_subscriptions: ActiveSubscriptions, + response_channels: Arc>, ) { println!("provider: create_new_subscription\r"); match build_subscription( @@ -316,51 +381,54 @@ async fn create_new_subscription( send_to_loop.clone(), ð_action, providers, + response_channels.clone(), ) .await { - Ok(future) => { + Ok((Some(future), None)) => { + // this is a local sub // send a response to the target that the subscription was successful - send_to_loop - .send(KernelMessage { - id: km_id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: target.clone(), - rsvp: rsvp.clone(), - message: Message::Response(( - Response { - inherit: false, - body: serde_json::to_vec(&EthResponse::Ok).unwrap(), - metadata: None, - capabilities: vec![], - }, - None, - )), - lazy_load_blob: None, - }) - .await - .expect("eth: sender died!"); - // await the subscription error and kill it if so - if let Err(e) = future.await { - let _ = send_to_loop - .send(make_error_message(&our, km_id, target.clone(), e)) - .await; - } + kernel_message( + &our, + km_id, + target.clone(), + rsvp.clone(), + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + let mut subs = active_subscriptions + .entry(target.clone()) + .or_insert(HashMap::new()); + let target2 = target.clone(); + let active_subs = active_subscriptions.clone(); + subs.insert( + sub_id, + ActiveSub::Local(tokio::spawn(async move { + // await the subscription error and kill it if so + if let Err(e) = future.await { + error_message(&our, km_id, target2.clone(), e, &send_to_loop).await; + active_subs.entry(target2).and_modify(|sub_map| { + sub_map.remove(&km_id); + }); + } + })), + ); + } + Ok((None, Some(provider_node))) => { + // this is a remote sub + let mut subs = active_subscriptions + .entry(target.clone()) + .or_insert(HashMap::new()); + subs.insert(sub_id, ActiveSub::Remote(provider_node)); } Err(e) => { - let _ = send_to_loop - .send(make_error_message(&our, km_id, target.clone(), e)) - .await; + error_message(&our, km_id, target.clone(), e, &send_to_loop).await; } + _ => panic!(), } - active_subscriptions - .entry(target.process) - .and_modify(|sub_map| { - sub_map.remove(&km_id); - }); } async fn build_subscription( @@ -371,7 +439,15 @@ async fn build_subscription( send_to_loop: MessageSender, eth_action: &EthAction, providers: Providers, -) -> Result>, EthError> { + response_channels: Arc>, +) -> Result< + ( + // this is dumb, sorry + Option>>, + Option, + ), + EthError, +> { println!("provider: build_subscription\r"); let EthAction::SubscribeLogs { sub_id, @@ -409,20 +485,79 @@ async fn build_subscription( .await { let rx = pubsub.inner().get_raw_subscription(id).await; - return Ok(maintain_subscription( - our, - *sub_id, - rx, - target, - rsvp, - send_to_loop, + return Ok(( + Some(maintain_subscription( + our, + *sub_id, + rx, + target, + rsvp, + send_to_loop, + )), + None, )); } // this provider failed and needs to be reset url_provider.pubsub = None; } + // now we need a response channel + let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); + response_channels.insert(km_id, sender); for node_provider in &aps.nodes { - // todo + // in order, forward the request to each node provider + // until one sends back a satisfactory response + kernel_message( + &our, + rand::random(), + Address { + node: node_provider.name.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + Some(60), // TODO + serde_json::to_vec(ð_action).unwrap(), + &send_to_loop, + ) + .await; + let Some(response_km) = response_receiver.recv().await else { + // never hit this + continue; + }; + let Message::Response((resp, _context)) = response_km.message else { + // if we hit this, they spoofed a request with same id, ignore and possibly punish + continue; + }; + let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { + // if we hit this, they sent a malformed response, ignore and possibly punish + continue; + }; + if let EthResponse::Response { .. } = ð_response { + // if we hit this, they sent a response instead of a subscription, ignore and possibly punish + continue; + } + if let EthResponse::Err(error) = ð_response { + // if we hit this, they sent an error, if it's an error that might + // not be our fault, we can try another provider + match error { + EthError::NoRpcForChain => continue, + EthError::PermissionDenied => continue, + _ => {} + } + } + kernel_message( + &our, + km_id, + target, + None, + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + response_channels.remove(&km_id); + return Ok((None, Some(node_provider.name.clone()))); } return Err(EthError::NoRpcForChain); } @@ -438,7 +573,7 @@ async fn maintain_subscription( println!("provider: maintain_subscription\r"); loop { match rx.recv().await { - Err(e) => { + Err(_e) => { return Err(EthError::SubscriptionClosed(sub_id)); } Ok(value) => { @@ -448,78 +583,46 @@ async fn maintain_subscription( "eth: failed to deserialize subscription result".to_string(), ) })?; - send_to_loop - .send(KernelMessage { - id: rand::random(), - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target: target.clone(), - rsvp: rsvp.clone(), - message: Message::Request(Request { - inherit: false, - expects_response: None, - body: serde_json::to_vec(&EthSubResult::Ok(EthSub { - id: sub_id, - result, - })) - .unwrap(), - metadata: None, - capabilities: vec![], - }), - lazy_load_blob: None, - }) - .await - .map_err(|_| EthError::RpcError("eth: sender died".to_string()))?; + kernel_message( + &our, + rand::random(), + target.clone(), + rsvp.clone(), + true, + None, + EthSubResult::Ok(EthSub { id: sub_id, result }), + &send_to_loop, + ) + .await; } } } } async fn fulfill_request( - our: String, - km_id: u64, - target: Address, - rsvp: Option
, - timeout: u64, - send_to_loop: MessageSender, + our: &str, + send_to_loop: &MessageSender, eth_action: EthAction, providers: Providers, -) { + mut remote_request_receiver: MessageReceiver, +) -> Result { println!("provider: fulfill_request\r"); let EthAction::Request { chain_id, - method, - params, + ref method, + ref params, } = eth_action else { - return; + return Err(EthError::PermissionDenied); // will never hit }; let Some(method) = to_static_str(&method) else { - let _ = send_to_loop - .send(make_error_message( - &our, - km_id, - target, - EthError::InvalidMethod(method), - )) - .await; - return; + return Err(EthError::InvalidMethod(method.to_string())); }; let Some(mut aps) = providers.get_mut(&chain_id) else { - let _ = send_to_loop - .send(make_error_message( - &our, - km_id, - target, - EthError::NoRpcForChain, - )) - .await; - return; + return Err(EthError::NoRpcForChain); }; // first, try any url providers we have for this chain, - // then if we have none or they all fail, go to node providers. + // then if we have none or they all fail, go to node provider. // finally, if no provider works, return an error. for url_provider in &mut aps.urls { let pubsub = match &url_provider.pubsub { @@ -532,68 +635,67 @@ async fn fulfill_request( } } }; - let Ok(response) = tokio::time::timeout( - std::time::Duration::from_secs(timeout), - pubsub.inner().prepare(method, params.clone()), - ) - .await - else { + let Ok(value) = pubsub.inner().prepare(method, params.clone()).await else { // this provider failed and needs to be reset url_provider.pubsub = None; continue; }; - if let Ok(value) = response { - send_to_loop - .send(KernelMessage { - id: km_id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target, - rsvp, - message: Message::Response(( - Response { - inherit: false, - body: serde_json::to_vec(&EthResponse::Response { value }).unwrap(), - metadata: None, - capabilities: vec![], - }, - None, - )), - lazy_load_blob: None, - }) - .await - .expect("eth: sender died!"); - return; - } - // this provider failed and needs to be reset - url_provider.pubsub = None; + return Ok(EthResponse::Response { value }); } for node_provider in &aps.nodes { - // todo - } - let _ = send_to_loop - .send(make_error_message( - &our, - km_id, - target, - EthError::NoRpcForChain, - )) + // in order, forward the request to each node provider + // until one sends back a satisfactory response + kernel_message( + our, + rand::random(), + Address { + node: node_provider.name.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + Some(60), // TODO + serde_json::to_vec(ð_action).unwrap(), + &send_to_loop, + ) .await; + let Some(response_km) = remote_request_receiver.recv().await else { + // never hit this + continue; + }; + let Message::Response((resp, _context)) = response_km.message else { + // if we hit this, they spoofed a request with same id, ignore and possibly punish + continue; + }; + let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { + // if we hit this, they sent a malformed response, ignore and possibly punish + continue; + }; + if let EthResponse::Err(error) = ð_response { + // if we hit this, they sent an error, if it's an error that might + // not be our fault, we can try another provider + match error { + EthError::NoRpcForChain => continue, + EthError::PermissionDenied => continue, + _ => {} + } + } + return Ok(eth_response); + } + Err(EthError::NoRpcForChain) } async fn handle_eth_config_action( our: &str, access_settings: &mut AccessSettings, caps_oracle: &CapMessageSender, - km: KernelMessage, + km: &KernelMessage, eth_config_action: EthConfigAction, providers: &mut Providers, -) -> Result<(), EthError> { +) -> EthConfigResponse { println!("provider: handle_eth_config_action\r"); if km.source.node != our { - return Err(EthError::PermissionDenied); + return EthConfigResponse::PermissionDenied; } // check capabilities to ensure the sender is allowed to make this request let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); @@ -615,32 +717,152 @@ async fn handle_eth_config_action( .await .expect("eth: capability oracle died!"); if !recv_cap_bool.await.unwrap_or(false) { - return Err(EthError::PermissionDenied); + return EthConfigResponse::PermissionDenied; } // modify our providers and access settings based on config action - todo!() + match eth_config_action { + EthConfigAction::AddProvider(provider) => { + let mut aps = providers + .entry(provider.chain_id) + .or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); + aps.add_provider_config(provider); + } + EthConfigAction::RemoveProvider((chain_id, remove)) => { + if let Some(mut aps) = providers.get_mut(&chain_id) { + aps.remove_provider(&remove); + } + } + EthConfigAction::SetPublic => { + access_settings.public = true; + } + EthConfigAction::SetPrivate => { + access_settings.public = false; + } + EthConfigAction::AllowNode(node) => { + access_settings.allow.insert(node); + } + EthConfigAction::UnallowNode(node) => { + access_settings.allow.remove(&node); + } + EthConfigAction::DenyNode(node) => { + access_settings.deny.insert(node); + } + EthConfigAction::UndenyNode(node) => { + access_settings.deny.remove(&node); + } + EthConfigAction::SetProviders(new_providers) => { + let new_map = DashMap::new(); + for entry in new_providers { + let mut aps = new_map.entry(entry.chain_id).or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); + aps.add_provider_config(entry); + } + *providers = Arc::new(new_map); + } + EthConfigAction::GetProviders => { + return EthConfigResponse::Providers( + providers + .iter() + .map(|entry| { + entry + .urls + .iter() + .map(|url_provider| ProviderConfig { + chain_id: *entry.key(), + provider: NodeOrRpcUrl::RpcUrl(url_provider.url.clone()), + trusted: url_provider.trusted, + }) + .chain(entry.nodes.iter().map(|node_provider| ProviderConfig { + chain_id: *entry.key(), + provider: NodeOrRpcUrl::Node(KnsUpdate { + name: node_provider.name.clone(), + owner: "".to_string(), + node: "".to_string(), + public_key: "".to_string(), + ip: "".to_string(), + port: 0, + routers: vec![], + }), + trusted: node_provider.trusted, + })) + .collect::>() + }) + .flatten() + .collect(), + ); + } + EthConfigAction::GetAccessSettings => { + return EthConfigResponse::AccessSettings(access_settings.clone()); + } + } + EthConfigResponse::Ok } -fn make_error_message(our: &str, km_id: u64, target: Address, error: EthError) -> KernelMessage { - println!("provider: make_error_message\r"); - KernelMessage { - id: km_id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, +async fn error_message( + our: &str, + km_id: u64, + target: Address, + error: EthError, + send_to_loop: &MessageSender, +) { + kernel_message( + our, + km_id, target, - rsvp: None, - message: Message::Response(( - Response { - inherit: false, - body: serde_json::to_vec(&EthResponse::Err(error)).unwrap(), - metadata: None, - capabilities: vec![], + None, + false, + None, + EthResponse::Err(error), + send_to_loop, + ) + .await +} + +async fn kernel_message( + our: &str, + km_id: u64, + target: Address, + rsvp: Option
, + req: bool, + timeout: Option, + body: T, + send_to_loop: &MessageSender, +) { + let _ = send_to_loop + .send(KernelMessage { + id: km_id, + source: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), }, - None, - )), - lazy_load_blob: None, - } + target, + rsvp, + message: if req { + Message::Request(Request { + inherit: false, + expects_response: timeout, + body: serde_json::to_vec(&body).unwrap(), + metadata: None, + capabilities: vec![], + }) + } else { + Message::Response(( + Response { + inherit: false, + body: serde_json::to_vec(&body).unwrap(), + metadata: None, + capabilities: vec![], + }, + None, + )) + }, + lazy_load_blob: None, + }) + .await; } diff --git a/lib/src/eth.rs b/lib/src/eth.rs index e06830e13..1a2b42b34 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -26,13 +26,13 @@ pub enum EthAction { }, } -/// Incoming Result type for subscription updates or errors that processes will receive. +/// Incoming `Request` containing subscription updates or errors that processes will receive. /// Can deserialize all incoming requests from eth:distro:sys to this type. /// /// Will be serialized and deserialized using `serde_json::to_vec` and `serde_json::from_slice`. pub type EthSubResult = Result; -/// Incoming Request type for successful subscription updates. +/// Incoming type for successful subscription updates. #[derive(Debug, Serialize, Deserialize)] pub struct EthSub { pub id: u64, @@ -49,6 +49,9 @@ pub struct EthSubError { /// The Response type which a process will get from requesting with an [`EthAction`] will be /// of this type, serialized and deserialized using `serde_json::to_vec` /// and `serde_json::from_slice`. +/// +/// In the case of an [`EthAction::SubscribeLogs`] request, the response will indicate if +/// the subscription was successfully created or not. #[derive(Debug, Serialize, Deserialize)] pub enum EthResponse { Ok, @@ -58,6 +61,8 @@ pub enum EthResponse { #[derive(Debug, Serialize, Deserialize)] pub enum EthError { + /// provider module cannot parse message + MalformedRequest, /// No RPC provider for the chain NoRpcForChain, /// Underlying transport error @@ -105,8 +110,10 @@ pub enum EthConfigAction { /// Set the list of providers to a new list. /// Replaces all existing saved provider configs. SetProviders(SavedConfigs), - /// Get the list of as a [`SavedConfigs`] object. + /// Get the list of current providers as a [`SavedConfigs`] object. GetProviders, + /// Get the current access settings. + GetAccessSettings, } /// Response type from an [`EthConfigAction`] request. @@ -114,7 +121,11 @@ pub enum EthConfigAction { pub enum EthConfigResponse { Ok, /// Response from a GetProviders request. + /// Note the [`crate::core::KnsUpdate`] will only have the correct `name` field. + /// The rest of the Update is not saved in this module. Providers(SavedConfigs), + /// Response from a GetAccessSettings request. + AccessSettings(AccessSettings), /// Permission denied due to missing capability PermissionDenied, } @@ -157,6 +168,7 @@ pub fn to_static_str(method: &str) -> Option<&'static str> { } /// Settings for our ETH provider +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct AccessSettings { pub public: bool, // whether or not other nodes can access through us pub allow: HashSet, // whitelist for access (only used if public == false) From 7258e849783dfa4c46653173f6ebc436658047e0 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 26 Feb 2024 17:32:27 -0300 Subject: [PATCH 11/23] eth error factor out unused --- kinode/src/eth/provider.rs | 45 ++++++++++------------- lib/src/eth.rs | 74 +++++++++++++++++--------------------- 2 files changed, 51 insertions(+), 68 deletions(-) diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs index 5bd51b55d..66530f5de 100644 --- a/kinode/src/eth/provider.rs +++ b/kinode/src/eth/provider.rs @@ -456,9 +456,7 @@ async fn build_subscription( params, } = eth_action else { - return Err(EthError::InvalidMethod( - "eth: only accepts subscribe logs requests".to_string(), - )); + return Err(EthError::PermissionDenied); // will never hit }; let Some(mut aps) = providers.get_mut(&chain_id) else { return Err(EthError::NoRpcForChain); @@ -572,30 +570,23 @@ async fn maintain_subscription( ) -> Result<(), EthError> { println!("provider: maintain_subscription\r"); loop { - match rx.recv().await { - Err(_e) => { - return Err(EthError::SubscriptionClosed(sub_id)); - } - Ok(value) => { - let result: SubscriptionResult = - serde_json::from_str(value.get()).map_err(|_| { - EthError::RpcError( - "eth: failed to deserialize subscription result".to_string(), - ) - })?; - kernel_message( - &our, - rand::random(), - target.clone(), - rsvp.clone(), - true, - None, - EthSubResult::Ok(EthSub { id: sub_id, result }), - &send_to_loop, - ) - .await; - } - } + let value = rx + .recv() + .await + .map_err(|_| EthError::SubscriptionClosed(sub_id))?; + let result: SubscriptionResult = + serde_json::from_str(value.get()).map_err(|_| EthError::SubscriptionClosed(sub_id))?; + kernel_message( + &our, + rand::random(), + target.clone(), + rsvp.clone(), + true, + None, + EthSubResult::Ok(EthSub { id: sub_id, result }), + &send_to_loop, + ) + .await; } } diff --git a/lib/src/eth.rs b/lib/src/eth.rs index 1a2b42b34..4d11f4c1a 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -65,20 +65,12 @@ pub enum EthError { MalformedRequest, /// No RPC provider for the chain NoRpcForChain, - /// Underlying transport error - TransportError(String), /// Subscription closed SubscriptionClosed(u64), - /// The subscription ID was not found, so we couldn't unsubscribe. - SubscriptionNotFound, /// Invalid method InvalidMethod(String), - /// Invalid params - InvalidParams, /// Permission denied PermissionDenied, - /// Internal RPC error - RpcError(String), /// RPC timed out RpcTimeout, } @@ -130,6 +122,39 @@ pub enum EthConfigResponse { PermissionDenied, } +/// Settings for our ETH provider +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct AccessSettings { + pub public: bool, // whether or not other nodes can access through us + pub allow: HashSet, // whitelist for access (only used if public == false) + pub deny: HashSet, // blacklist for access (always used) +} + +pub type SavedConfigs = Vec; + +/// Provider config. Can currently be a node or a ws provider instance. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ProviderConfig { + pub chain_id: u64, + pub trusted: bool, + pub provider: NodeOrRpcUrl, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum NodeOrRpcUrl { + Node(crate::core::KnsUpdate), + RpcUrl(String), +} + +impl std::cmp::PartialEq for NodeOrRpcUrl { + fn eq(&self, other: &str) -> bool { + match self { + NodeOrRpcUrl::Node(kns) => kns.name == other, + NodeOrRpcUrl::RpcUrl(url) => url == other, + } + } +} + // // Internal types // @@ -166,36 +191,3 @@ pub fn to_static_str(method: &str) -> Option<&'static str> { _ => None, } } - -/// Settings for our ETH provider -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct AccessSettings { - pub public: bool, // whether or not other nodes can access through us - pub allow: HashSet, // whitelist for access (only used if public == false) - pub deny: HashSet, // blacklist for access (always used) -} - -pub type SavedConfigs = Vec; - -/// Provider config. Can currently be a node or a ws provider instance. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct ProviderConfig { - pub chain_id: u64, - pub trusted: bool, - pub provider: NodeOrRpcUrl, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum NodeOrRpcUrl { - Node(crate::core::KnsUpdate), - RpcUrl(String), -} - -impl std::cmp::PartialEq for NodeOrRpcUrl { - fn eq(&self, other: &str) -> bool { - match self { - NodeOrRpcUrl::Node(kns) => kns.name == other, - NodeOrRpcUrl::RpcUrl(url) => url == other, - } - } -} From 67ea504c09c5293aae85f4111f74b9ed1f3e5943 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 26 Feb 2024 17:44:44 -0300 Subject: [PATCH 12/23] re-add missing errors to EthError --- Cargo.lock | 18 +++++++++--------- kinode/packages/app_store/app_store/Cargo.toml | 2 +- .../packages/kns_indexer/get_block/Cargo.toml | 2 +- .../kns_indexer/kns_indexer/Cargo.toml | 2 +- lib/src/eth.rs | 4 ++++ 5 files changed, 16 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93596fac2..80c791a4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -448,7 +448,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=d1c29c2)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=9838f5d)", "rand 0.8.5", "serde", "serde_json", @@ -2008,7 +2008,7 @@ dependencies = [ name = "get_block" version = "0.1.0" dependencies = [ - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=d1c29c2)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=9838f5d)", "serde", "serde_json", "wit-bindgen", @@ -2713,8 +2713,12 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib.git?rev=9d185e1#9d185e1e264c93af53d004ba32520fd5d046e7e5" +source = "git+https://github.com/kinode-dao/process_lib?rev=9838f5d#9838f5d1cb0b2b6d63ad4d82c9628ff3e8d33dff" dependencies = [ + "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-primitives", + "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "anyhow", "bincode", "http 1.0.0", @@ -2730,12 +2734,8 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=d1c29c2#d1c29c20ab7be0e1cf98ee96711b719e147cfc56" +source = "git+https://github.com/kinode-dao/process_lib.git?rev=9d185e1#9d185e1e264c93af53d004ba32520fd5d046e7e5" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", "anyhow", "bincode", "http 1.0.0", @@ -2786,7 +2786,7 @@ dependencies = [ "anyhow", "bincode", "hex", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=d1c29c2)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=9838f5d)", "rmp-serde", "serde", "serde_json", diff --git a/kinode/packages/app_store/app_store/Cargo.toml b/kinode/packages/app_store/app_store/Cargo.toml index be213dab9..a215763fd 100644 --- a/kinode/packages/app_store/app_store/Cargo.toml +++ b/kinode/packages/app_store/app_store/Cargo.toml @@ -9,7 +9,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "d1c29c2" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "9838f5d" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/get_block/Cargo.toml b/kinode/packages/kns_indexer/get_block/Cargo.toml index 37674ec1c..330327c51 100644 --- a/kinode/packages/kns_indexer/get_block/Cargo.toml +++ b/kinode/packages/kns_indexer/get_block/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "d1c29c2" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "9838f5d" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml index 344b617a0..b0aae5876 100644 --- a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml +++ b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml @@ -10,7 +10,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" bincode = "1.3.3" hex = "0.4.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "d1c29c2" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "9838f5d" } rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/lib/src/eth.rs b/lib/src/eth.rs index 4d11f4c1a..c8cfd1e56 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -69,10 +69,14 @@ pub enum EthError { SubscriptionClosed(u64), /// Invalid method InvalidMethod(String), + /// Invalid parameters + InvalidParams, /// Permission denied PermissionDenied, /// RPC timed out RpcTimeout, + /// RPC gave garbage back + RpcMalformedResponse, } /// The action type used for configuring eth:distro:sys. Only processes which have the "root" From 3299e3135b25baa8394e178b592d6fad4bb7d268 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 26 Feb 2024 20:46:27 -0300 Subject: [PATCH 13/23] whoa: refactor provider to deal with network errors --- kinode/default_providers_mainnet.json | 71 +- kinode/default_providers_testnet.json | 100 +-- kinode/packages/app_store/pkg/manifest.json | 6 + kinode/packages/terminal/pkg/manifest.json | 6 + kinode/src/eth/mod.rs | 900 +++++++++++++++++++- kinode/src/eth/provider.rs | 859 ------------------- kinode/src/kernel/mod.rs | 50 +- kinode/src/kernel/process.rs | 9 +- kinode/src/main.rs | 15 +- kinode/src/state.rs | 8 +- lib/src/core.rs | 8 +- lib/src/eth.rs | 7 +- 12 files changed, 1058 insertions(+), 981 deletions(-) delete mode 100644 kinode/src/eth/provider.rs diff --git a/kinode/default_providers_mainnet.json b/kinode/default_providers_mainnet.json index a01899ae3..8a04c72af 100644 --- a/kinode/default_providers_mainnet.json +++ b/kinode/default_providers_mainnet.json @@ -4,63 +4,60 @@ "trusted": false, "provider": { "RpcUrl": "wss://ethereum.publicnode.com" - }, - "public": false, - "allow": [], - "deny": [] + } }, { "chain_id": 10, "trusted": true, "provider": { "Node": { - "name": "default-router-1.os", - "owner": "", - "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", - "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", - "ip": "147.135.114.167", - "port": 9005, - "routers": [] + "use_as_provider": true, + "kns_update": { + "name": "default-router-1.os", + "owner": "", + "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", + "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", + "ip": "147.135.114.167", + "port": 9005, + "routers": [] + } } - }, - "public": false, - "allow": [], - "deny": [] + } }, { "chain_id": 10, "trusted": true, "provider": { "Node": { - "name": "default-router-2.os", - "owner": "", - "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", - "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", - "ip": "147.135.114.167", - "port": 9006, - "routers": [] + "use_as_provider": true, + "kns_update": { + "name": "default-router-2.os", + "owner": "", + "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", + "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", + "ip": "147.135.114.167", + "port": 9006, + "routers": [] + } } - }, - "public": false, - "allow": [], - "deny": [] + } }, { "chain_id": 10, "trusted": true, "provider": { "Node": { - "name": "default-router-3.os", - "owner": "", - "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", - "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", - "ip": "147.135.114.167", - "port": 9007, - "routers": [] + "use_as_provider": true, + "kns_update": { + "name": "default-router-3.os", + "owner": "", + "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", + "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", + "ip": "147.135.114.167", + "port": 9007, + "routers": [] + } } - }, - "public": false, - "allow": [], - "deny": [] + } } ] \ No newline at end of file diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index 5b4d0496b..0528870b9 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -1,76 +1,78 @@ [ - { - "chain_id": 1, - "trusted": false, - "provider": { - "RpcUrl": "wss://ethereum.publicnode.com" - }, - "public": false, - "allow": [], - "deny": [] - }, { "chain_id": 11155111, - "trusted": false, + "trusted": true, "provider": { - "RpcUrl": "wss://eth-sepolia.g.alchemy.com/v2/already_deleted_it" - }, - "public": false, - "allow": [], - "deny": [] + "Node": { + "use_as_provider": true, + "kns_update": { + "name": "sepoliarocks.os", + "owner": "", + "node": "0x2b2e9479333c5f94b62a242d75298ce98d13ad0af95070bc0b8d35aacdbddfa7", + "public_key": "0x958a3f43aee848826db2c0b36545e1e775bf310b003f0d7abf72ab8697a1b72c", + "ip": "", + "port": 0, + "routers": [ + "default-router-1.os", + "default-router-2.os", + "default-router-3.os" + ] + } + } + } }, { "chain_id": 11155111, "trusted": true, "provider": { "Node": { - "name": "default-router-1.os", - "owner": "", - "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", - "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", - "ip": "147.135.114.167", - "port": 9002, - "routers": [] + "use_as_provider": false, + "kns_update": { + "name": "default-router-1.os", + "owner": "", + "node": "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", + "public_key": "0xb1b1cf23c89f651aac3e5fd4decb04aa177ab0ec8ce5f1d3877b90bb6f5779db", + "ip": "147.135.114.167", + "port": 9002, + "routers": [] + } } - }, - "public": false, - "allow": [], - "deny": [] + } }, { "chain_id": 11155111, "trusted": true, "provider": { "Node": { - "name": "default-router-2.os", - "owner": "", - "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", - "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", - "ip": "147.135.114.167", - "port": 9003, - "routers": [] + "use_as_provider": false, + "kns_update": { + "name": "default-router-2.os", + "owner": "", + "node": "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", + "public_key": "0xab9f1a996db3a4e1dbcd31d765daedeb3af9af4f570c0968463b5be3a7d1e992", + "ip": "147.135.114.167", + "port": 9003, + "routers": [] + } } - }, - "public": false, - "allow": [], - "deny": [] + } }, { "chain_id": 11155111, "trusted": true, "provider": { "Node": { - "name": "default-router-3.os", - "owner": "", - "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", - "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", - "ip": "147.135.114.167", - "port": 9004, - "routers": [] + "use_as_provider": false, + "kns_update": { + "name": "default-router-3.os", + "owner": "", + "node": "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a", + "public_key": "0x536e30785e64dd0349a697285af365b5ed7c4d300010139261cfc4dbdd5b254b", + "ip": "147.135.114.167", + "port": 9004, + "routers": [] + } } - }, - "public": false, - "allow": [], - "deny": [] + } } ] \ No newline at end of file diff --git a/kinode/packages/app_store/pkg/manifest.json b/kinode/packages/app_store/pkg/manifest.json index 5112319d4..705a80667 100644 --- a/kinode/packages/app_store/pkg/manifest.json +++ b/kinode/packages/app_store/pkg/manifest.json @@ -13,6 +13,12 @@ "vfs:distro:sys", "kernel:distro:sys", "eth:distro:sys", + { + "process": "eth:distro:sys", + "params": { + "root": true + } + }, "sqlite:distro:sys", "kv:distro:sys", "chess:chess:sys", diff --git a/kinode/packages/terminal/pkg/manifest.json b/kinode/packages/terminal/pkg/manifest.json index 7c787eb08..72003cee3 100644 --- a/kinode/packages/terminal/pkg/manifest.json +++ b/kinode/packages/terminal/pkg/manifest.json @@ -12,6 +12,12 @@ "kernel:distro:sys", "vfs:distro:sys", "eth:distro:sys", + { + "process": "eth:distro:sys", + "params": { + "root": true + } + }, "sqlite:distro:sys", "kv:distro:sys", "chess:chess:sys", diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index a6b442703..f71ea0a08 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -1,2 +1,898 @@ -pub mod provider; -pub use lib::types::eth as types; +use alloy_providers::provider::Provider; +use alloy_pubsub::{PubSubFrontend, RawSubscription}; +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_types::pubsub::SubscriptionResult; +use alloy_transport_ws::WsConnect; +use anyhow::Result; +use dashmap::DashMap; +use futures::Future; +use lib::types::core::*; +use lib::types::eth::*; +use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use tokio::task::JoinHandle; +use url::Url; + +/// meta-type for all incoming requests we need to handle +#[derive(Debug, Serialize, Deserialize)] +#[serde(untagged)] +enum IncomingReq { + EthAction(EthAction), + EthConfigAction(EthConfigAction), + EthSubResult(EthSubResult), +} + +/// mapping of chain id to ordered lists of providers +type Providers = Arc>; + +#[derive(Debug)] +struct ActiveProviders { + pub urls: Vec, + pub nodes: Vec, +} + +#[derive(Debug)] +struct UrlProvider { + pub trusted: bool, + pub url: String, + pub pubsub: Option>, +} + +#[derive(Debug)] +struct NodeProvider { + pub trusted: bool, + /// semi-temporary flag to mark if this provider is currently usable + /// future updates will make this more dynamic + pub usable: bool, + pub name: String, +} + +/// existing subscriptions held by local OR remote processes +type ActiveSubscriptions = Arc>>; + +type ResponseChannels = Arc>; + +#[derive(Debug)] +enum ActiveSub { + Local(JoinHandle<()>), + Remote(String), // name of node providing this subscription for us +} + +impl ActiveProviders { + fn add_provider_config(&mut self, new: ProviderConfig) { + match new.provider { + NodeOrRpcUrl::Node { + kns_update, + use_as_provider, + } => { + self.nodes.push(NodeProvider { + trusted: new.trusted, + usable: use_as_provider, + name: kns_update.name, + }); + } + NodeOrRpcUrl::RpcUrl(url) => { + self.urls.push(UrlProvider { + trusted: new.trusted, + url, + pubsub: None, + }); + } + } + } + + fn remove_provider(&mut self, remove: &str) { + self.urls.retain(|x| x.url != remove); + self.nodes.retain(|x| x.name != remove); + } +} + +async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { + println!("provider: activate_url_provider\r"); + match Url::parse(&provider.url)?.scheme() { + "ws" | "wss" => { + let connector = WsConnect { + url: provider.url.to_string(), + auth: None, + }; + let client = tokio::time::timeout( + std::time::Duration::from_secs(10), + ClientBuilder::default().ws(connector), + ) + .await??; + provider.pubsub = Some(Provider::new_with_client(client)); + Ok(()) + } + _ => Err(anyhow::anyhow!( + "Only `ws://` or `wss://` providers are supported." + )), + } +} + +/// The ETH provider runtime process is responsible for connecting to one or more ETH RPC providers +/// and using them to service indexing requests from other apps. +pub async fn provider( + our: String, + configs: SavedConfigs, + send_to_loop: MessageSender, + mut recv_in_client: MessageReceiver, + mut net_error_recv: NetworkErrorReceiver, + caps_oracle: CapMessageSender, + _print_tx: PrintSender, +) -> Result<()> { + let our = Arc::new(our); + + let mut access_settings = AccessSettings { + public: false, + allow: HashSet::new(), + deny: HashSet::new(), + }; + + // convert saved configs into data structure that we will use to route queries + let mut providers: Providers = Arc::new(DashMap::new()); + for entry in configs { + let mut ap = providers.entry(entry.chain_id).or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); + ap.add_provider_config(entry); + } + + // handles of longrunning subscriptions. + let mut active_subscriptions: ActiveSubscriptions = Arc::new(DashMap::new()); + + // channels to pass incoming responses to outstanding requests + // keyed by KM ID + let mut response_channels: Arc> = Arc::new(DashMap::new()); + + loop { + tokio::select! { + Some(wrapped_error) = net_error_recv.recv() => { + // we got an error from a remote node provider -- + // forward it to response channel if it exists + if let Some(chan) = response_channels.get(&wrapped_error.id) { + // can't close channel here, as response may be an error + // and fulfill_request may wish to try other providers. + let _ = chan.send(Err(wrapped_error)).await; + } + } + Some(km) = recv_in_client.recv() => { + let km_id = km.id; + let response_target = km.rsvp.as_ref().unwrap_or(&km.source).clone(); + if let Err(e) = handle_message( + &our, + &mut access_settings, + km, + &send_to_loop, + &caps_oracle, + &mut providers, + &mut active_subscriptions, + &mut response_channels, + ) + .await + { + error_message(&our, km_id, response_target, e, &send_to_loop).await; + }; + } + } + } +} + +/// handle incoming requests, namely [`EthAction`] and [`EthConfigAction`]. +/// also handle responses that are passthroughs from remote provider nodes. +async fn handle_message( + our: &str, + access_settings: &mut AccessSettings, + km: KernelMessage, + send_to_loop: &MessageSender, + caps_oracle: &CapMessageSender, + providers: &mut Providers, + active_subscriptions: &mut ActiveSubscriptions, + response_channels: &mut ResponseChannels, +) -> Result<(), EthError> { + println!("provider: handle_message\r"); + match &km.message { + Message::Response(_) => { + // map response to the correct channel + if let Some(chan) = response_channels.get(&km.id) { + // can't close channel here, as response may be an error + // and fulfill_request may wish to try other providers. + let _ = chan.send(Ok(km)).await; + } else { + println!("eth: got weird response!!\r"); + } + Ok(()) + } + Message::Request(req) => { + let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config + let Ok(req) = serde_json::from_slice::(&req.body) else { + return Err(EthError::MalformedRequest); + }; + match req { + IncomingReq::EthAction(eth_action) => { + handle_eth_action( + our, + access_settings, + send_to_loop, + km, + timeout, + eth_action, + providers, + active_subscriptions, + response_channels, + ) + .await + } + IncomingReq::EthConfigAction(eth_config_action) => { + kernel_message( + our, + km.id, + km.source.clone(), + km.rsvp.clone(), + false, + None, + handle_eth_config_action( + our, + access_settings, + caps_oracle, + &km, + eth_config_action, + providers, + ) + .await, + send_to_loop, + ) + .await; + Ok(()) + } + IncomingReq::EthSubResult(eth_sub_result) => { + // forward this to rsvp + kernel_message( + our, + km.id, + km.source.clone(), + km.rsvp.clone(), + true, + None, + eth_sub_result, + send_to_loop, + ) + .await; + Ok(()) + } + } + } + } +} + +async fn handle_eth_action( + our: &str, + access_settings: &mut AccessSettings, + send_to_loop: &MessageSender, + km: KernelMessage, + timeout: u64, + eth_action: EthAction, + providers: &mut Providers, + active_subscriptions: &mut ActiveSubscriptions, + response_channels: &mut ResponseChannels, +) -> Result<(), EthError> { + println!("provider: handle_eth_action: {eth_action:?}\r"); + // check our access settings if the request is from a remote node + if km.source.node != our { + if !access_settings.deny.contains(&km.source.node) { + if !access_settings.public { + if !access_settings.allow.contains(&km.source.node) { + return Err(EthError::PermissionDenied); + } + } + } else { + return Err(EthError::PermissionDenied); + } + } + + // for each incoming action, we need to assign a provider from our map + // based on the chain id. once we assign a provider, we can use it for + // this request. if the provider is not usable, cycle through options + // before returning an error. + match eth_action { + EthAction::SubscribeLogs { sub_id, .. } => { + create_new_subscription( + our.to_string(), + km.id, + km.source.clone(), + km.rsvp, + send_to_loop.clone(), + sub_id, + eth_action, + providers.clone(), + active_subscriptions.clone(), + response_channels.clone(), + ) + .await; + } + EthAction::UnsubscribeLogs(sub_id) => { + let mut sub_map = active_subscriptions + .entry(km.source) + .or_insert(HashMap::new()); + if let Some(sub) = sub_map.remove(&sub_id) { + match sub { + ActiveSub::Local(handle) => { + handle.abort(); + } + ActiveSub::Remote(node) => { + kernel_message( + our, + rand::random(), + Address { + node: node.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + Some(60), // TODO + serde_json::to_vec(ð_action).unwrap(), + send_to_loop, + ) + .await; + } + } + } + } + EthAction::Request { .. } => { + let (sender, receiver) = tokio::sync::mpsc::channel(1); + response_channels.insert(km.id, sender); + let our = our.to_string(); + let send_to_loop = send_to_loop.clone(); + let providers = providers.clone(); + let response_channels = response_channels.clone(); + tokio::spawn(async move { + let res = tokio::time::timeout( + std::time::Duration::from_secs(timeout), + fulfill_request(&our, km.id, &send_to_loop, eth_action, providers, receiver), + ) + .await; + match res { + Ok(Ok(response)) => { + kernel_message( + &our, + km.id, + km.source, + km.rsvp, + false, + None, + response, + &send_to_loop, + ) + .await; + } + Ok(Err(e)) => { + error_message(&our, km.id, km.source, e, &send_to_loop).await; + } + Err(_) => { + error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) + .await; + } + } + response_channels.remove(&km.id); + }); + } + } + Ok(()) +} + +/// cleans itself up when the subscription is closed or fails. +async fn create_new_subscription( + our: String, + km_id: u64, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, + sub_id: u64, + eth_action: EthAction, + providers: Providers, + active_subscriptions: ActiveSubscriptions, + response_channels: ResponseChannels, +) { + println!("provider: create_new_subscription\r"); + match build_subscription( + our.clone(), + km_id, + target.clone(), + rsvp.clone(), + send_to_loop.clone(), + ð_action, + providers, + response_channels.clone(), + ) + .await + { + Ok((Some(future), None)) => { + // this is a local sub + // send a response to the target that the subscription was successful + kernel_message( + &our, + km_id, + target.clone(), + rsvp.clone(), + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + let mut subs = active_subscriptions + .entry(target.clone()) + .or_insert(HashMap::new()); + let target2 = target.clone(); + let active_subs = active_subscriptions.clone(); + subs.insert( + sub_id, + ActiveSub::Local(tokio::spawn(async move { + // await the subscription error and kill it if so + if let Err(e) = future.await { + error_message(&our, km_id, target2.clone(), e, &send_to_loop).await; + active_subs.entry(target2).and_modify(|sub_map| { + sub_map.remove(&km_id); + }); + } + })), + ); + } + Ok((None, Some(provider_node))) => { + // this is a remote sub + let mut subs = active_subscriptions + .entry(target.clone()) + .or_insert(HashMap::new()); + subs.insert(sub_id, ActiveSub::Remote(provider_node)); + } + Err(e) => { + error_message(&our, km_id, target.clone(), e, &send_to_loop).await; + } + _ => panic!(), + } +} + +async fn build_subscription( + our: String, + km_id: u64, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, + eth_action: &EthAction, + providers: Providers, + response_channels: ResponseChannels, +) -> Result< + ( + // this is dumb, sorry + Option>>, + Option, + ), + EthError, +> { + println!("provider: build_subscription\r"); + let EthAction::SubscribeLogs { + sub_id, + chain_id, + kind, + params, + } = eth_action + else { + return Err(EthError::PermissionDenied); // will never hit + }; + let Some(mut aps) = providers.get_mut(&chain_id) else { + return Err(EthError::NoRpcForChain); + }; + // first, try any url providers we have for this chain, + // then if we have none or they all fail, go to node providers. + // finally, if no provider works, return an error. + for url_provider in &mut aps.urls { + let pubsub = match &url_provider.pubsub { + Some(pubsub) => pubsub, + None => { + if let Ok(()) = activate_url_provider(url_provider).await { + url_provider.pubsub.as_ref().unwrap() + } else { + continue; + } + } + }; + let kind = serde_json::to_value(&kind).unwrap(); + let params = serde_json::to_value(¶ms).unwrap(); + if let Ok(id) = pubsub + .inner() + .prepare("eth_subscribe", [kind, params]) + .await + { + let rx = pubsub.inner().get_raw_subscription(id).await; + return Ok(( + Some(maintain_subscription( + our, + *sub_id, + rx, + target, + rsvp, + send_to_loop, + )), + None, + )); + } + // this provider failed and needs to be reset + url_provider.pubsub = None; + } + // now we need a response channel + let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); + response_channels.insert(km_id, sender); + for node_provider in &mut aps.nodes { + if !node_provider.usable { + continue; + } + // in order, forward the request to each node provider + // until one sends back a satisfactory response + kernel_message( + &our, + km_id, + Address { + node: node_provider.name.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + Some(60), // TODO + serde_json::to_vec(ð_action).unwrap(), + &send_to_loop, + ) + .await; + let Some(Ok(response_km)) = response_receiver.recv().await else { + // our message timed out or receiver was offline + println!("provider: build_subscription: response_receiver timed out / is offline\r"); + continue; + }; + let Message::Response((resp, _context)) = response_km.message else { + // if we hit this, they spoofed a request with same id, ignore and possibly punish + node_provider.usable = false; + continue; + }; + let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { + // if we hit this, they sent a malformed response, ignore and possibly punish + node_provider.usable = false; + continue; + }; + if let EthResponse::Response { .. } = ð_response { + // if we hit this, they sent a response instead of a subscription, ignore and possibly punish + node_provider.usable = false; + continue; + } + if let EthResponse::Err(error) = ð_response { + // if we hit this, they sent an error, if it's an error that might + // not be our fault, we can try another provider + match error { + EthError::NoRpcForChain => continue, + EthError::PermissionDenied => continue, + _ => {} + } + } + kernel_message( + &our, + km_id, + target, + None, + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + response_channels.remove(&km_id); + return Ok((None, Some(node_provider.name.clone()))); + } + return Err(EthError::NoRpcForChain); +} + +async fn maintain_subscription( + our: String, + sub_id: u64, + mut rx: RawSubscription, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, +) -> Result<(), EthError> { + println!("provider: maintain_subscription\r"); + loop { + let value = rx + .recv() + .await + .map_err(|_| EthError::SubscriptionClosed(sub_id))?; + let result: SubscriptionResult = + serde_json::from_str(value.get()).map_err(|_| EthError::SubscriptionClosed(sub_id))?; + kernel_message( + &our, + rand::random(), + target.clone(), + rsvp.clone(), + true, + None, + EthSubResult::Ok(EthSub { id: sub_id, result }), + &send_to_loop, + ) + .await; + } +} + +async fn fulfill_request( + our: &str, + km_id: u64, + send_to_loop: &MessageSender, + eth_action: EthAction, + providers: Providers, + mut remote_request_receiver: ProcessMessageReceiver, +) -> Result { + println!("provider: fulfill_request\r"); + let EthAction::Request { + chain_id, + ref method, + ref params, + } = eth_action + else { + return Err(EthError::PermissionDenied); // will never hit + }; + let Some(method) = to_static_str(&method) else { + return Err(EthError::InvalidMethod(method.to_string())); + }; + let Some(mut aps) = providers.get_mut(&chain_id) else { + return Err(EthError::NoRpcForChain); + }; + // first, try any url providers we have for this chain, + // then if we have none or they all fail, go to node provider. + // finally, if no provider works, return an error. + for url_provider in &mut aps.urls { + let pubsub = match &url_provider.pubsub { + Some(pubsub) => pubsub, + None => { + if let Ok(()) = activate_url_provider(url_provider).await { + url_provider.pubsub.as_ref().unwrap() + } else { + continue; + } + } + }; + let Ok(value) = pubsub.inner().prepare(method, params.clone()).await else { + // this provider failed and needs to be reset + url_provider.pubsub = None; + continue; + }; + return Ok(EthResponse::Response { value }); + } + for node_provider in &mut aps.nodes { + if !node_provider.usable { + continue; + } + // in order, forward the request to each node provider + // until one sends back a satisfactory response + kernel_message( + our, + km_id, + Address { + node: node_provider.name.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + Some(60), // TODO + serde_json::to_vec(ð_action).unwrap(), + &send_to_loop, + ) + .await; + let Some(Ok(response_km)) = remote_request_receiver.recv().await else { + println!("provider: fulfill_request: remote_request_receiver timed out / is offline\r"); + continue; + }; + let Message::Response((resp, _context)) = response_km.message else { + // if we hit this, they spoofed a request with same id, ignore and possibly punish + node_provider.usable = false; + continue; + }; + let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { + // if we hit this, they sent a malformed response, ignore and possibly punish + node_provider.usable = false; + continue; + }; + if let EthResponse::Err(error) = ð_response { + // if we hit this, they sent an error, if it's an error that might + // not be our fault, we can try another provider + match error { + EthError::NoRpcForChain => continue, + EthError::PermissionDenied => continue, + _ => {} + } + } + return Ok(eth_response); + } + Err(EthError::NoRpcForChain) +} + +async fn handle_eth_config_action( + our: &str, + access_settings: &mut AccessSettings, + caps_oracle: &CapMessageSender, + km: &KernelMessage, + eth_config_action: EthConfigAction, + providers: &mut Providers, +) -> EthConfigResponse { + println!("provider: handle_eth_config_action\r"); + if km.source.node != our { + return EthConfigResponse::PermissionDenied; + } + // check capabilities to ensure the sender is allowed to make this request + let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); + caps_oracle + .send(CapMessage::Has { + on: km.source.process.clone(), + cap: Capability { + issuer: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + params: serde_json::to_string(&serde_json::json!({ + "root": true, + })) + .unwrap(), + }, + responder: send_cap_bool, + }) + .await + .expect("eth: capability oracle died!"); + if !recv_cap_bool.await.unwrap_or(false) { + return EthConfigResponse::PermissionDenied; + } + + // modify our providers and access settings based on config action + match eth_config_action { + EthConfigAction::AddProvider(provider) => { + let mut aps = providers + .entry(provider.chain_id) + .or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); + aps.add_provider_config(provider); + } + EthConfigAction::RemoveProvider((chain_id, remove)) => { + if let Some(mut aps) = providers.get_mut(&chain_id) { + aps.remove_provider(&remove); + } + } + EthConfigAction::SetPublic => { + access_settings.public = true; + } + EthConfigAction::SetPrivate => { + access_settings.public = false; + } + EthConfigAction::AllowNode(node) => { + access_settings.allow.insert(node); + } + EthConfigAction::UnallowNode(node) => { + access_settings.allow.remove(&node); + } + EthConfigAction::DenyNode(node) => { + access_settings.deny.insert(node); + } + EthConfigAction::UndenyNode(node) => { + access_settings.deny.remove(&node); + } + EthConfigAction::SetProviders(new_providers) => { + let new_map = DashMap::new(); + for entry in new_providers { + let mut aps = new_map.entry(entry.chain_id).or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); + aps.add_provider_config(entry); + } + *providers = Arc::new(new_map); + } + EthConfigAction::GetProviders => { + return EthConfigResponse::Providers( + providers + .iter() + .map(|entry| { + entry + .urls + .iter() + .map(|url_provider| ProviderConfig { + chain_id: *entry.key(), + provider: NodeOrRpcUrl::RpcUrl(url_provider.url.clone()), + trusted: url_provider.trusted, + }) + .chain(entry.nodes.iter().map(|node_provider| ProviderConfig { + chain_id: *entry.key(), + provider: NodeOrRpcUrl::Node { + kns_update: KnsUpdate { + name: node_provider.name.clone(), + owner: "".to_string(), + node: "".to_string(), + public_key: "".to_string(), + ip: "".to_string(), + port: 0, + routers: vec![], + }, + use_as_provider: node_provider.usable, + }, + trusted: node_provider.trusted, + })) + .collect::>() + }) + .flatten() + .collect(), + ); + } + EthConfigAction::GetAccessSettings => { + return EthConfigResponse::AccessSettings(access_settings.clone()); + } + } + EthConfigResponse::Ok +} + +async fn error_message( + our: &str, + km_id: u64, + target: Address, + error: EthError, + send_to_loop: &MessageSender, +) { + println!("EthError: {error:?}\r"); + kernel_message( + our, + km_id, + target, + None, + false, + None, + EthResponse::Err(error), + send_to_loop, + ) + .await +} + +async fn kernel_message( + our: &str, + km_id: u64, + target: Address, + rsvp: Option
, + req: bool, + timeout: Option, + body: T, + send_to_loop: &MessageSender, +) { + let _ = send_to_loop + .send(KernelMessage { + id: km_id, + source: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + target, + rsvp, + message: if req { + Message::Request(Request { + inherit: false, + expects_response: timeout, + body: serde_json::to_vec(&body).unwrap(), + metadata: None, + capabilities: vec![], + }) + } else { + Message::Response(( + Response { + inherit: false, + body: serde_json::to_vec(&body).unwrap(), + metadata: None, + capabilities: vec![], + }, + None, + )) + }, + lazy_load_blob: None, + }) + .await; +} diff --git a/kinode/src/eth/provider.rs b/kinode/src/eth/provider.rs deleted file mode 100644 index 66530f5de..000000000 --- a/kinode/src/eth/provider.rs +++ /dev/null @@ -1,859 +0,0 @@ -use alloy_providers::provider::Provider; -use alloy_pubsub::{PubSubFrontend, RawSubscription}; -use alloy_rpc_client::ClientBuilder; -use alloy_rpc_types::pubsub::SubscriptionResult; -use alloy_transport_ws::WsConnect; -use anyhow::Result; -use dashmap::DashMap; -use futures::Future; -use lib::types::core::*; -use lib::types::eth::*; -use serde::{Deserialize, Serialize}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use tokio::task::JoinHandle; -use url::Url; - -/// meta-type for all incoming requests we need to handle -#[derive(Debug, Serialize, Deserialize)] -#[serde(untagged)] -enum IncomingReq { - EthAction(EthAction), - EthConfigAction(EthConfigAction), - EthSubResult(EthSubResult), -} - -/// mapping of chain id to ordered lists of providers -type Providers = Arc>; - -#[derive(Debug)] -struct ActiveProviders { - pub urls: Vec, - pub nodes: Vec, -} - -#[derive(Debug)] -struct UrlProvider { - pub trusted: bool, - pub url: String, - pub pubsub: Option>, -} - -#[derive(Debug)] -struct NodeProvider { - pub trusted: bool, - pub name: String, -} - -/// existing subscriptions held by local OR remote processes -type ActiveSubscriptions = Arc>>; - -#[derive(Debug)] -enum ActiveSub { - Local(JoinHandle<()>), - Remote(String), // name of node providing this subscription for us -} - -impl ActiveProviders { - fn add_provider_config(&mut self, new: ProviderConfig) { - match new.provider { - NodeOrRpcUrl::Node(update) => { - self.nodes.push(NodeProvider { - trusted: new.trusted, - name: update.name, - }); - } - NodeOrRpcUrl::RpcUrl(url) => { - self.urls.push(UrlProvider { - trusted: new.trusted, - url, - pubsub: None, - }); - } - } - } - - fn remove_provider(&mut self, remove: &str) { - self.urls.retain(|x| x.url != remove); - self.nodes.retain(|x| x.name != remove); - } -} - -async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { - println!("provider: activate_url_provider\r"); - match Url::parse(&provider.url)?.scheme() { - "ws" | "wss" => { - let connector = WsConnect { - url: provider.url.to_string(), - auth: None, - }; - let client = tokio::time::timeout( - std::time::Duration::from_secs(10), - ClientBuilder::default().ws(connector), - ) - .await??; - provider.pubsub = Some(Provider::new_with_client(client)); - Ok(()) - } - _ => Err(anyhow::anyhow!( - "Only `ws://` or `wss://` providers are supported." - )), - } -} - -/// The ETH provider runtime process is responsible for connecting to one or more ETH RPC providers -/// and using them to service indexing requests from other apps. -pub async fn provider( - our: String, - configs: SavedConfigs, - send_to_loop: MessageSender, - mut recv_in_client: MessageReceiver, - caps_oracle: CapMessageSender, - _print_tx: PrintSender, -) -> Result<()> { - let our = Arc::new(our); - - let mut access_settings = AccessSettings { - public: false, - allow: HashSet::new(), - deny: HashSet::new(), - }; - - // convert saved configs into data structure that we will use to route queries - let mut providers: Providers = Arc::new(DashMap::new()); - for entry in configs { - let mut ap = providers.entry(entry.chain_id).or_insert(ActiveProviders { - urls: vec![], - nodes: vec![], - }); - ap.add_provider_config(entry); - } - - // handles of longrunning subscriptions. - let mut active_subscriptions: ActiveSubscriptions = Arc::new(DashMap::new()); - - // channels to pass incoming responses to outstanding requests - // keyed by KM ID - let mut response_channels: Arc> = Arc::new(DashMap::new()); - - while let Some(km) = recv_in_client.recv().await { - let km_id = km.id; - let response_target = km.rsvp.as_ref().unwrap_or(&km.source).clone(); - if let Err(e) = handle_message( - &our, - &mut access_settings, - km, - &send_to_loop, - &caps_oracle, - &mut providers, - &mut active_subscriptions, - &mut response_channels, - ) - .await - { - error_message(&our, km_id, response_target, e, &send_to_loop).await; - }; - } - Err(anyhow::anyhow!("eth: fatal: message receiver closed!")) -} - -/// handle incoming requests, namely [`EthAction`] and [`EthConfigAction`]. -/// also handle responses that are passthroughs from remote provider nodes. -async fn handle_message( - our: &str, - access_settings: &mut AccessSettings, - km: KernelMessage, - send_to_loop: &MessageSender, - caps_oracle: &CapMessageSender, - providers: &mut Providers, - active_subscriptions: &mut ActiveSubscriptions, - response_channels: &mut Arc>, -) -> Result<(), EthError> { - println!("provider: handle_message\r"); - match &km.message { - Message::Response(_) => { - // map response to the correct channel - if let Some((_id, sender)) = response_channels.remove(&km.id) { - // can't close channel here, as response may be an error - // and fullfill_request may wish to try other providers. - let _ = sender.send(km).await; - } else { - println!("eth: got weird response!!\r"); - } - Ok(()) - } - Message::Request(req) => { - let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config - let Ok(req) = serde_json::from_slice::(&req.body) else { - return Err(EthError::MalformedRequest); - }; - match req { - IncomingReq::EthAction(eth_action) => { - handle_eth_action( - our, - access_settings, - send_to_loop, - km, - timeout, - eth_action, - providers, - active_subscriptions, - response_channels, - ) - .await - } - IncomingReq::EthConfigAction(eth_config_action) => { - kernel_message( - our, - km.id, - km.source.clone(), - km.rsvp.clone(), - false, - None, - handle_eth_config_action( - our, - access_settings, - caps_oracle, - &km, - eth_config_action, - providers, - ) - .await, - send_to_loop, - ) - .await; - Ok(()) - } - IncomingReq::EthSubResult(eth_sub_result) => { - // forward this to rsvp - kernel_message( - our, - km.id, - km.source.clone(), - km.rsvp.clone(), - true, - None, - eth_sub_result, - send_to_loop, - ) - .await; - Ok(()) - } - } - } - } -} - -async fn handle_eth_action( - our: &str, - access_settings: &mut AccessSettings, - send_to_loop: &MessageSender, - km: KernelMessage, - timeout: u64, - eth_action: EthAction, - providers: &mut Providers, - active_subscriptions: &mut ActiveSubscriptions, - response_channels: &mut Arc>, -) -> Result<(), EthError> { - println!("provider: handle_eth_action: {eth_action:?}\r"); - // check our access settings if the request is from a remote node - if km.source.node != our { - if !access_settings.deny.contains(&km.source.node) { - if !access_settings.public { - if !access_settings.allow.contains(&km.source.node) { - return Err(EthError::PermissionDenied); - } - } - } else { - return Err(EthError::PermissionDenied); - } - } - - // for each incoming action, we need to assign a provider from our map - // based on the chain id. once we assign a provider, we can use it for - // this request. if the provider is not usable, cycle through options - // before returning an error. - match eth_action { - EthAction::SubscribeLogs { sub_id, .. } => { - create_new_subscription( - our.to_string(), - km.id, - km.source.clone(), - km.rsvp, - send_to_loop.clone(), - sub_id, - eth_action, - providers.clone(), - active_subscriptions.clone(), - response_channels.clone(), - ) - .await; - } - EthAction::UnsubscribeLogs(sub_id) => { - let mut sub_map = active_subscriptions - .entry(km.source) - .or_insert(HashMap::new()); - if let Some(sub) = sub_map.remove(&sub_id) { - match sub { - ActiveSub::Local(handle) => { - handle.abort(); - } - ActiveSub::Remote(node) => { - kernel_message( - our, - rand::random(), - Address { - node: node.clone(), - process: ETH_PROCESS_ID.clone(), - }, - None, - true, - Some(60), // TODO - serde_json::to_vec(ð_action).unwrap(), - send_to_loop, - ) - .await; - } - } - } - } - EthAction::Request { .. } => { - let (sender, receiver) = tokio::sync::mpsc::channel(1); - response_channels.insert(km.id, sender); - let our = our.to_string(); - let send_to_loop = send_to_loop.clone(); - let providers = providers.clone(); - let response_channels = response_channels.clone(); - tokio::spawn(async move { - let res = tokio::time::timeout( - std::time::Duration::from_secs(timeout), - fulfill_request(&our, &send_to_loop, eth_action, providers, receiver), - ) - .await; - match res { - Ok(Ok(response)) => { - kernel_message( - &our, - km.id, - km.source, - km.rsvp, - false, - None, - response, - &send_to_loop, - ) - .await; - } - Ok(Err(e)) => { - error_message(&our, km.id, km.source, e, &send_to_loop).await; - } - Err(_) => { - error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) - .await; - } - } - response_channels.remove(&km.id); - }); - } - } - Ok(()) -} - -/// cleans itself up when the subscription is closed or fails. -async fn create_new_subscription( - our: String, - km_id: u64, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, - sub_id: u64, - eth_action: EthAction, - providers: Providers, - active_subscriptions: ActiveSubscriptions, - response_channels: Arc>, -) { - println!("provider: create_new_subscription\r"); - match build_subscription( - our.clone(), - km_id, - target.clone(), - rsvp.clone(), - send_to_loop.clone(), - ð_action, - providers, - response_channels.clone(), - ) - .await - { - Ok((Some(future), None)) => { - // this is a local sub - // send a response to the target that the subscription was successful - kernel_message( - &our, - km_id, - target.clone(), - rsvp.clone(), - false, - None, - EthResponse::Ok, - &send_to_loop, - ) - .await; - let mut subs = active_subscriptions - .entry(target.clone()) - .or_insert(HashMap::new()); - let target2 = target.clone(); - let active_subs = active_subscriptions.clone(); - subs.insert( - sub_id, - ActiveSub::Local(tokio::spawn(async move { - // await the subscription error and kill it if so - if let Err(e) = future.await { - error_message(&our, km_id, target2.clone(), e, &send_to_loop).await; - active_subs.entry(target2).and_modify(|sub_map| { - sub_map.remove(&km_id); - }); - } - })), - ); - } - Ok((None, Some(provider_node))) => { - // this is a remote sub - let mut subs = active_subscriptions - .entry(target.clone()) - .or_insert(HashMap::new()); - subs.insert(sub_id, ActiveSub::Remote(provider_node)); - } - Err(e) => { - error_message(&our, km_id, target.clone(), e, &send_to_loop).await; - } - _ => panic!(), - } -} - -async fn build_subscription( - our: String, - km_id: u64, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, - eth_action: &EthAction, - providers: Providers, - response_channels: Arc>, -) -> Result< - ( - // this is dumb, sorry - Option>>, - Option, - ), - EthError, -> { - println!("provider: build_subscription\r"); - let EthAction::SubscribeLogs { - sub_id, - chain_id, - kind, - params, - } = eth_action - else { - return Err(EthError::PermissionDenied); // will never hit - }; - let Some(mut aps) = providers.get_mut(&chain_id) else { - return Err(EthError::NoRpcForChain); - }; - // first, try any url providers we have for this chain, - // then if we have none or they all fail, go to node providers. - // finally, if no provider works, return an error. - for url_provider in &mut aps.urls { - let pubsub = match &url_provider.pubsub { - Some(pubsub) => pubsub, - None => { - if let Ok(()) = activate_url_provider(url_provider).await { - url_provider.pubsub.as_ref().unwrap() - } else { - continue; - } - } - }; - let kind = serde_json::to_value(&kind).unwrap(); - let params = serde_json::to_value(¶ms).unwrap(); - if let Ok(id) = pubsub - .inner() - .prepare("eth_subscribe", [kind, params]) - .await - { - let rx = pubsub.inner().get_raw_subscription(id).await; - return Ok(( - Some(maintain_subscription( - our, - *sub_id, - rx, - target, - rsvp, - send_to_loop, - )), - None, - )); - } - // this provider failed and needs to be reset - url_provider.pubsub = None; - } - // now we need a response channel - let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); - response_channels.insert(km_id, sender); - for node_provider in &aps.nodes { - // in order, forward the request to each node provider - // until one sends back a satisfactory response - kernel_message( - &our, - rand::random(), - Address { - node: node_provider.name.clone(), - process: ETH_PROCESS_ID.clone(), - }, - None, - true, - Some(60), // TODO - serde_json::to_vec(ð_action).unwrap(), - &send_to_loop, - ) - .await; - let Some(response_km) = response_receiver.recv().await else { - // never hit this - continue; - }; - let Message::Response((resp, _context)) = response_km.message else { - // if we hit this, they spoofed a request with same id, ignore and possibly punish - continue; - }; - let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { - // if we hit this, they sent a malformed response, ignore and possibly punish - continue; - }; - if let EthResponse::Response { .. } = ð_response { - // if we hit this, they sent a response instead of a subscription, ignore and possibly punish - continue; - } - if let EthResponse::Err(error) = ð_response { - // if we hit this, they sent an error, if it's an error that might - // not be our fault, we can try another provider - match error { - EthError::NoRpcForChain => continue, - EthError::PermissionDenied => continue, - _ => {} - } - } - kernel_message( - &our, - km_id, - target, - None, - false, - None, - EthResponse::Ok, - &send_to_loop, - ) - .await; - response_channels.remove(&km_id); - return Ok((None, Some(node_provider.name.clone()))); - } - return Err(EthError::NoRpcForChain); -} - -async fn maintain_subscription( - our: String, - sub_id: u64, - mut rx: RawSubscription, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, -) -> Result<(), EthError> { - println!("provider: maintain_subscription\r"); - loop { - let value = rx - .recv() - .await - .map_err(|_| EthError::SubscriptionClosed(sub_id))?; - let result: SubscriptionResult = - serde_json::from_str(value.get()).map_err(|_| EthError::SubscriptionClosed(sub_id))?; - kernel_message( - &our, - rand::random(), - target.clone(), - rsvp.clone(), - true, - None, - EthSubResult::Ok(EthSub { id: sub_id, result }), - &send_to_loop, - ) - .await; - } -} - -async fn fulfill_request( - our: &str, - send_to_loop: &MessageSender, - eth_action: EthAction, - providers: Providers, - mut remote_request_receiver: MessageReceiver, -) -> Result { - println!("provider: fulfill_request\r"); - let EthAction::Request { - chain_id, - ref method, - ref params, - } = eth_action - else { - return Err(EthError::PermissionDenied); // will never hit - }; - let Some(method) = to_static_str(&method) else { - return Err(EthError::InvalidMethod(method.to_string())); - }; - let Some(mut aps) = providers.get_mut(&chain_id) else { - return Err(EthError::NoRpcForChain); - }; - // first, try any url providers we have for this chain, - // then if we have none or they all fail, go to node provider. - // finally, if no provider works, return an error. - for url_provider in &mut aps.urls { - let pubsub = match &url_provider.pubsub { - Some(pubsub) => pubsub, - None => { - if let Ok(()) = activate_url_provider(url_provider).await { - url_provider.pubsub.as_ref().unwrap() - } else { - continue; - } - } - }; - let Ok(value) = pubsub.inner().prepare(method, params.clone()).await else { - // this provider failed and needs to be reset - url_provider.pubsub = None; - continue; - }; - return Ok(EthResponse::Response { value }); - } - for node_provider in &aps.nodes { - // in order, forward the request to each node provider - // until one sends back a satisfactory response - kernel_message( - our, - rand::random(), - Address { - node: node_provider.name.clone(), - process: ETH_PROCESS_ID.clone(), - }, - None, - true, - Some(60), // TODO - serde_json::to_vec(ð_action).unwrap(), - &send_to_loop, - ) - .await; - let Some(response_km) = remote_request_receiver.recv().await else { - // never hit this - continue; - }; - let Message::Response((resp, _context)) = response_km.message else { - // if we hit this, they spoofed a request with same id, ignore and possibly punish - continue; - }; - let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { - // if we hit this, they sent a malformed response, ignore and possibly punish - continue; - }; - if let EthResponse::Err(error) = ð_response { - // if we hit this, they sent an error, if it's an error that might - // not be our fault, we can try another provider - match error { - EthError::NoRpcForChain => continue, - EthError::PermissionDenied => continue, - _ => {} - } - } - return Ok(eth_response); - } - Err(EthError::NoRpcForChain) -} - -async fn handle_eth_config_action( - our: &str, - access_settings: &mut AccessSettings, - caps_oracle: &CapMessageSender, - km: &KernelMessage, - eth_config_action: EthConfigAction, - providers: &mut Providers, -) -> EthConfigResponse { - println!("provider: handle_eth_config_action\r"); - if km.source.node != our { - return EthConfigResponse::PermissionDenied; - } - // check capabilities to ensure the sender is allowed to make this request - let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); - caps_oracle - .send(CapMessage::Has { - on: km.source.process.clone(), - cap: Capability { - issuer: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - params: serde_json::to_string(&serde_json::json!({ - "root": true, - })) - .unwrap(), - }, - responder: send_cap_bool, - }) - .await - .expect("eth: capability oracle died!"); - if !recv_cap_bool.await.unwrap_or(false) { - return EthConfigResponse::PermissionDenied; - } - - // modify our providers and access settings based on config action - match eth_config_action { - EthConfigAction::AddProvider(provider) => { - let mut aps = providers - .entry(provider.chain_id) - .or_insert(ActiveProviders { - urls: vec![], - nodes: vec![], - }); - aps.add_provider_config(provider); - } - EthConfigAction::RemoveProvider((chain_id, remove)) => { - if let Some(mut aps) = providers.get_mut(&chain_id) { - aps.remove_provider(&remove); - } - } - EthConfigAction::SetPublic => { - access_settings.public = true; - } - EthConfigAction::SetPrivate => { - access_settings.public = false; - } - EthConfigAction::AllowNode(node) => { - access_settings.allow.insert(node); - } - EthConfigAction::UnallowNode(node) => { - access_settings.allow.remove(&node); - } - EthConfigAction::DenyNode(node) => { - access_settings.deny.insert(node); - } - EthConfigAction::UndenyNode(node) => { - access_settings.deny.remove(&node); - } - EthConfigAction::SetProviders(new_providers) => { - let new_map = DashMap::new(); - for entry in new_providers { - let mut aps = new_map.entry(entry.chain_id).or_insert(ActiveProviders { - urls: vec![], - nodes: vec![], - }); - aps.add_provider_config(entry); - } - *providers = Arc::new(new_map); - } - EthConfigAction::GetProviders => { - return EthConfigResponse::Providers( - providers - .iter() - .map(|entry| { - entry - .urls - .iter() - .map(|url_provider| ProviderConfig { - chain_id: *entry.key(), - provider: NodeOrRpcUrl::RpcUrl(url_provider.url.clone()), - trusted: url_provider.trusted, - }) - .chain(entry.nodes.iter().map(|node_provider| ProviderConfig { - chain_id: *entry.key(), - provider: NodeOrRpcUrl::Node(KnsUpdate { - name: node_provider.name.clone(), - owner: "".to_string(), - node: "".to_string(), - public_key: "".to_string(), - ip: "".to_string(), - port: 0, - routers: vec![], - }), - trusted: node_provider.trusted, - })) - .collect::>() - }) - .flatten() - .collect(), - ); - } - EthConfigAction::GetAccessSettings => { - return EthConfigResponse::AccessSettings(access_settings.clone()); - } - } - EthConfigResponse::Ok -} - -async fn error_message( - our: &str, - km_id: u64, - target: Address, - error: EthError, - send_to_loop: &MessageSender, -) { - kernel_message( - our, - km_id, - target, - None, - false, - None, - EthResponse::Err(error), - send_to_loop, - ) - .await -} - -async fn kernel_message( - our: &str, - km_id: u64, - target: Address, - rsvp: Option
, - req: bool, - timeout: Option, - body: T, - send_to_loop: &MessageSender, -) { - let _ = send_to_loop - .send(KernelMessage { - id: km_id, - source: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - target, - rsvp, - message: if req { - Message::Request(Request { - inherit: false, - expects_response: timeout, - body: serde_json::to_vec(&body).unwrap(), - metadata: None, - capabilities: vec![], - }) - } else { - Message::Response(( - Response { - inherit: false, - body: serde_json::to_vec(&body).unwrap(), - metadata: None, - capabilities: vec![], - }, - None, - )) - }, - lazy_load_blob: None, - }) - .await; -} diff --git a/kinode/src/kernel/mod.rs b/kinode/src/kernel/mod.rs index fde3f7c30..4090bda82 100644 --- a/kinode/src/kernel/mod.rs +++ b/kinode/src/kernel/mod.rs @@ -20,11 +20,6 @@ const PROCESS_CHANNEL_CAPACITY: usize = 100; const DEFAULT_WIT_VERSION: u32 = 0; -type ProcessMessageSender = - tokio::sync::mpsc::Sender>; -type ProcessMessageReceiver = - tokio::sync::mpsc::Receiver>; - #[derive(Serialize, Deserialize)] struct StartProcessMetadata { source: t::Address, @@ -39,8 +34,11 @@ type Senders = HashMap; type ProcessHandles = HashMap>>; enum ProcessSender { - Runtime(t::MessageSender), - Userspace(ProcessMessageSender), + Runtime { + sender: t::MessageSender, + net_errors: Option, + }, + Userspace(t::ProcessMessageSender), } /// persist kernel's process_map state for next bootup @@ -671,7 +669,12 @@ pub async fn kernel( send_to_net: t::MessageSender, home_directory_path: String, contract_chain_and_address: (u64, String), - runtime_extensions: Vec<(t::ProcessId, t::MessageSender, bool)>, + runtime_extensions: Vec<( + t::ProcessId, + t::MessageSender, + Option, + bool, + )>, default_pki_entries: Vec, ) -> Result<()> { let mut config = Config::new(); @@ -689,10 +692,19 @@ pub async fn kernel( let mut senders: Senders = HashMap::new(); senders.insert( t::ProcessId::new(Some("net"), "distro", "sys"), - ProcessSender::Runtime(send_to_net.clone()), + ProcessSender::Runtime { + sender: send_to_net.clone(), + net_errors: None, // networking module does not accept net errors sent to it + }, ); - for (process_id, sender, _) in runtime_extensions { - senders.insert(process_id, ProcessSender::Runtime(sender)); + for (process_id, sender, net_error_sender, _) in runtime_extensions { + senders.insert( + process_id, + ProcessSender::Runtime { + sender, + net_errors: net_error_sender, + }, + ); } // each running process is stored in this map @@ -896,8 +908,8 @@ pub async fn kernel( Some(wrapped_network_error) = network_error_recv.recv() => { let _ = send_to_terminal.send( t::Printout { - verbosity: 2, - content: format!("event loop: got network error: {:?}", wrapped_network_error) + verbosity: 3, + content: format!("{wrapped_network_error:?}") } ).await; // forward the error to the relevant process @@ -905,10 +917,10 @@ pub async fn kernel( Some(ProcessSender::Userspace(sender)) => { let _ = sender.send(Err(wrapped_network_error)).await; } - Some(ProcessSender::Runtime(_sender)) => { - // TODO should runtime modules get these? no - // this will change if a runtime process ever makes - // a message directed to not-our-node + Some(ProcessSender::Runtime { net_errors, .. }) => { + if let Some(net_errors) = net_errors { + let _ = net_errors.send(wrapped_network_error).await; + } } None => { let _ = send_to_terminal @@ -1052,7 +1064,7 @@ pub async fn kernel( let _ = send_to_terminal.send( t::Printout { verbosity: 3, - content: format!("{}", kernel_message) + content: format!("{kernel_message}") } ).await; @@ -1082,7 +1094,7 @@ pub async fn kernel( Some(ProcessSender::Userspace(sender)) => { let _ = sender.send(Ok(kernel_message)).await; } - Some(ProcessSender::Runtime(sender)) => { + Some(ProcessSender::Runtime { sender, .. }) => { sender.send(kernel_message).await.expect("event loop: fatal: runtime module died"); } None => { diff --git a/kinode/src/kernel/process.rs b/kinode/src/kernel/process.rs index 1c1225d76..f5d7484f5 100644 --- a/kinode/src/kernel/process.rs +++ b/kinode/src/kernel/process.rs @@ -1,4 +1,3 @@ -use crate::kernel::{ProcessMessageReceiver, ProcessMessageSender}; use crate::KERNEL_PROCESS_ID; use anyhow::Result; use lib::types::core as t; @@ -29,9 +28,9 @@ pub struct ProcessState { /// information about ourself pub metadata: t::ProcessMetadata, /// pipe from which we get messages from the main event loop - pub recv_in_process: ProcessMessageReceiver, + pub recv_in_process: t::ProcessMessageReceiver, /// pipe to send messages to ourself (received in `recv_in_process`) - pub self_sender: ProcessMessageSender, + pub self_sender: t::ProcessMessageSender, /// pipe for sending messages to the main event loop pub send_to_loop: t::MessageSender, /// pipe for sending [`t::Printout`]s to the terminal @@ -488,8 +487,8 @@ pub async fn make_process_loop( metadata: t::ProcessMetadata, send_to_loop: t::MessageSender, send_to_terminal: t::PrintSender, - mut recv_in_process: ProcessMessageReceiver, - send_to_process: ProcessMessageSender, + mut recv_in_process: t::ProcessMessageReceiver, + send_to_process: t::ProcessMessageSender, wasm_bytes: Vec, caps_oracle: t::CapMessageSender, engine: Engine, diff --git a/kinode/src/main.rs b/kinode/src/main.rs index 99dd7335a..2c9d32c36 100644 --- a/kinode/src/main.rs +++ b/kinode/src/main.rs @@ -226,6 +226,8 @@ async fn main() { mpsc::channel(HTTP_CHANNEL_CAPACITY); let (eth_provider_sender, eth_provider_receiver): (MessageSender, MessageReceiver) = mpsc::channel(ETH_PROVIDER_CHANNEL_CAPACITY); + let (eth_net_error_sender, eth_net_error_receiver): (NetworkErrorSender, NetworkErrorReceiver) = + mpsc::channel(EVENT_LOOP_CHANNEL_CAPACITY); // http client performs http requests on behalf of processes let (http_client_sender, http_client_receiver): (MessageSender, MessageReceiver) = mpsc::channel(HTTP_CLIENT_CHANNEL_CAPACITY); @@ -393,41 +395,49 @@ async fn main() { ( ProcessId::new(Some("http_server"), "distro", "sys"), http_server_sender, + None, false, ), ( ProcessId::new(Some("http_client"), "distro", "sys"), http_client_sender, + None, false, ), ( ProcessId::new(Some("timer"), "distro", "sys"), timer_service_sender, + None, true, ), ( ProcessId::new(Some("eth"), "distro", "sys"), eth_provider_sender, + Some(eth_net_error_sender), false, ), ( ProcessId::new(Some("vfs"), "distro", "sys"), vfs_message_sender, + None, false, ), ( ProcessId::new(Some("state"), "distro", "sys"), state_sender, + None, false, ), ( ProcessId::new(Some("kv"), "distro", "sys"), kv_sender, + None, false, ), ( ProcessId::new(Some("sqlite"), "distro", "sys"), sqlite_sender, + None, false, ), ]; @@ -473,7 +483,7 @@ async fn main() { .clone() .into_iter() .filter_map(|config| { - if let lib::eth::NodeOrRpcUrl::Node(kns_update) = config.provider { + if let lib::eth::NodeOrRpcUrl::Node { kns_update, .. } = config.provider { Some(kns_update) } else { None @@ -548,11 +558,12 @@ async fn main() { timer_service_receiver, print_sender.clone(), )); - tasks.spawn(eth::provider::provider( + tasks.spawn(eth::provider( our.name.clone(), eth_provider_config, kernel_message_sender.clone(), eth_provider_receiver, + eth_net_error_receiver, caps_oracle_sender.clone(), print_sender.clone(), )); diff --git a/kinode/src/state.rs b/kinode/src/state.rs index 95ecc1b63..372248291 100644 --- a/kinode/src/state.rs +++ b/kinode/src/state.rs @@ -18,7 +18,7 @@ pub async fn load_state( our_name: String, keypair: Arc, home_directory_path: String, - runtime_extensions: Vec<(ProcessId, MessageSender, bool)>, + runtime_extensions: Vec<(ProcessId, MessageSender, Option, bool)>, ) -> Result<(ProcessMap, DB, ReverseCapIndex), StateError> { let state_path = format!("{}/kernel", &home_directory_path); @@ -72,7 +72,7 @@ pub async fn load_state( &our_name, keypair, home_directory_path.clone(), - runtime_extensions.clone(), + runtime_extensions, &mut process_map, &mut reverse_cap_index, ) @@ -307,7 +307,7 @@ async fn bootstrap( our_name: &str, keypair: Arc, home_directory_path: String, - runtime_extensions: Vec<(ProcessId, MessageSender, bool)>, + runtime_extensions: Vec<(ProcessId, MessageSender, Option, bool)>, process_map: &mut ProcessMap, reverse_cap_index: &mut ReverseCapIndex, ) -> Result<()> { @@ -382,7 +382,7 @@ async fn bootstrap( wit_version: None, on_exit: OnExit::Restart, capabilities: runtime_caps.clone(), - public: runtime_module.2, + public: runtime_module.3, }); current.capabilities.extend(runtime_caps.clone()); } diff --git a/lib/src/core.rs b/lib/src/core.rs index a3fc37695..b05a0f734 100644 --- a/lib/src/core.rs +++ b/lib/src/core.rs @@ -547,8 +547,8 @@ fn display_message(m: &Message, delimiter: &str) -> String { format!("expects_response: {:?},", request.expects_response), format!( "body: {},", - match serde_json::from_slice::(&request.body) { - Ok(json) => format!("{}", json), + match std::str::from_utf8(&request.body) { + Ok(str) => str.to_string(), Err(_) => format!("{:?}", request.body), } ), @@ -749,6 +749,10 @@ pub type DebugReceiver = tokio::sync::mpsc::Receiver; pub type CapMessageSender = tokio::sync::mpsc::Sender; pub type CapMessageReceiver = tokio::sync::mpsc::Receiver; +pub type ProcessMessageSender = tokio::sync::mpsc::Sender>; +pub type ProcessMessageReceiver = + tokio::sync::mpsc::Receiver>; + // // types used for onchain identity system // diff --git a/lib/src/eth.rs b/lib/src/eth.rs index c8cfd1e56..899276322 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -146,14 +146,17 @@ pub struct ProviderConfig { #[derive(Clone, Debug, Deserialize, Serialize)] pub enum NodeOrRpcUrl { - Node(crate::core::KnsUpdate), + Node { + kns_update: crate::core::KnsUpdate, + use_as_provider: bool, // for routers inside saved config + }, RpcUrl(String), } impl std::cmp::PartialEq for NodeOrRpcUrl { fn eq(&self, other: &str) -> bool { match self { - NodeOrRpcUrl::Node(kns) => kns.name == other, + NodeOrRpcUrl::Node { kns_update, .. } => kns_update.name == other, NodeOrRpcUrl::RpcUrl(url) => url == other, } } From 1b012d515f11c600dd99e5b14ef9c3fd31bdf9ad Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Wed, 28 Feb 2024 19:24:37 -0300 Subject: [PATCH 14/23] little fixes --- kinode/default_providers_testnet.json | 16 ++++- kinode/src/eth/mod.rs | 86 ++++++++++++++------------- kinode/src/kernel/mod.rs | 31 ++++++---- lib/src/eth.rs | 2 +- 4 files changed, 77 insertions(+), 58 deletions(-) diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index 0528870b9..30a16e7a9 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -1,4 +1,14 @@ [ + { + "chain_id": 1, + "trusted": false, + "provider": { + "RpcUrl": "wss://ethereum.publicnode.com" + }, + "public": false, + "allow": [], + "deny": [] + }, { "chain_id": 11155111, "trusted": true, @@ -13,9 +23,9 @@ "ip": "", "port": 0, "routers": [ - "default-router-1.os", - "default-router-2.os", - "default-router-3.os" + "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", + "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", + "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a" ] } } diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index f71ea0a08..1b54d6600 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -205,7 +205,7 @@ async fn handle_message( Ok(()) } Message::Request(req) => { - let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config + let timeout = *req.expects_response.as_ref().unwrap_or(&600); // TODO make this a config let Ok(req) = serde_json::from_slice::(&req.body) else { return Err(EthError::MalformedRequest); }; @@ -278,17 +278,17 @@ async fn handle_eth_action( response_channels: &mut ResponseChannels, ) -> Result<(), EthError> { println!("provider: handle_eth_action: {eth_action:?}\r"); + println!("access settings: {access_settings:?}\r"); // check our access settings if the request is from a remote node if km.source.node != our { - if !access_settings.deny.contains(&km.source.node) { - if !access_settings.public { - if !access_settings.allow.contains(&km.source.node) { - return Err(EthError::PermissionDenied); - } - } - } else { + if access_settings.deny.contains(&km.source.node) { return Err(EthError::PermissionDenied); } + if !access_settings.public { + if !access_settings.allow.contains(&km.source.node) { + return Err(EthError::PermissionDenied); + } + } } // for each incoming action, we need to assign a provider from our map @@ -330,7 +330,7 @@ async fn handle_eth_action( }, None, true, - Some(60), // TODO + Some(600), // TODO serde_json::to_vec(ð_action).unwrap(), send_to_loop, ) @@ -346,36 +346,36 @@ async fn handle_eth_action( let send_to_loop = send_to_loop.clone(); let providers = providers.clone(); let response_channels = response_channels.clone(); - tokio::spawn(async move { - let res = tokio::time::timeout( - std::time::Duration::from_secs(timeout), - fulfill_request(&our, km.id, &send_to_loop, eth_action, providers, receiver), - ) - .await; - match res { - Ok(Ok(response)) => { - kernel_message( - &our, - km.id, - km.source, - km.rsvp, - false, - None, - response, - &send_to_loop, - ) + // tokio::spawn(async move { + let res = tokio::time::timeout( + std::time::Duration::from_secs(timeout), + fulfill_request(&our, km.id, &send_to_loop, eth_action, providers, receiver), + ) + .await; + match res { + Ok(Ok(response)) => { + kernel_message( + &our, + km.id, + km.source, + km.rsvp, + false, + None, + response, + &send_to_loop, + ) + .await; + } + Ok(Err(e)) => { + error_message(&our, km.id, km.source, e, &send_to_loop).await; + } + Err(_) => { + error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) .await; - } - Ok(Err(e)) => { - error_message(&our, km.id, km.source, e, &send_to_loop).await; - } - Err(_) => { - error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) - .await; - } } - response_channels.remove(&km.id); - }); + } + response_channels.remove(&km.id); + // }); } } Ok(()) @@ -538,7 +538,7 @@ async fn build_subscription( }, None, true, - Some(60), // TODO + Some(600), // TODO serde_json::to_vec(ð_action).unwrap(), &send_to_loop, ) @@ -664,7 +664,7 @@ async fn fulfill_request( return Ok(EthResponse::Response { value }); } for node_provider in &mut aps.nodes { - if !node_provider.usable { + if !node_provider.usable || node_provider.name == our { continue; } // in order, forward the request to each node provider @@ -678,8 +678,8 @@ async fn fulfill_request( }, None, true, - Some(60), // TODO - serde_json::to_vec(ð_action).unwrap(), + Some(600), // TODO + eth_action.clone(), &send_to_loop, ) .await; @@ -743,8 +743,10 @@ async fn handle_eth_config_action( .await .expect("eth: capability oracle died!"); if !recv_cap_bool.await.unwrap_or(false) { + println!("eth: capability oracle denied request, no cap\r"); return EthConfigResponse::PermissionDenied; } + println!("cap valid\r"); // modify our providers and access settings based on config action match eth_config_action { @@ -763,9 +765,11 @@ async fn handle_eth_config_action( } } EthConfigAction::SetPublic => { + println!("set public\r"); access_settings.public = true; } EthConfigAction::SetPrivate => { + println!("set private\r"); access_settings.public = false; } EthConfigAction::AllowNode(node) => { diff --git a/kinode/src/kernel/mod.rs b/kinode/src/kernel/mod.rs index 4090bda82..d0ccc2c38 100644 --- a/kinode/src/kernel/mod.rs +++ b/kinode/src/kernel/mod.rs @@ -453,13 +453,6 @@ async fn handle_kernel_request( // brutal and savage killing: aborting the task. // do not do this to a process if you don't want to risk // dropped messages / un-replied-to-requests / revoked caps - caps_oracle - .send(t::CapMessage::RevokeAll { - on: process_id.clone(), - responder: tokio::sync::oneshot::channel().0, - }) - .await - .expect("event loop: fatal: sender died"); let _ = senders.remove(&process_id); let process_handle = match process_handles.remove(&process_id) { Some(ph) => ph, @@ -481,7 +474,13 @@ async fn handle_kernel_request( .await; process_handle.abort(); process_map.remove(&process_id); - let _ = persist_state(&our_name, &send_to_loop, process_map).await; + caps_oracle + .send(t::CapMessage::RevokeAll { + on: process_id.clone(), + responder: tokio::sync::oneshot::channel().0, + }) + .await + .expect("event loop: fatal: sender died"); if request.expects_response.is_none() { return; } @@ -1116,6 +1115,12 @@ pub async fn kernel( }, // capabilities oracle: handles all requests to add, drop, and check capabilities Some(cap_message) = caps_oracle_receiver.recv() => { + let _ = send_to_terminal.send( + t::Printout { + verbosity: 3, + content: format!("{cap_message:?}") + } + ).await; match cap_message { t::CapMessage::Add { on, caps, responder } => { // insert cap in process map @@ -1173,16 +1178,16 @@ pub async fn kernel( }, t::CapMessage::RevokeAll { on, responder } => { let Some(granter) = reverse_cap_index.get(&on) else { + let _ = persist_state(&our.name, &send_to_loop, &process_map).await; let _ = responder.send(true); continue; }; for (grantee, caps) in granter { - let Some(entry) = process_map.get_mut(&grantee) else { - continue; + if let Some(entry) = process_map.get_mut(&grantee) { + for cap in caps { + entry.capabilities.remove(&cap); + } }; - for cap in caps { - entry.capabilities.remove(&cap); - } } let _ = persist_state(&our.name, &send_to_loop, &process_map).await; let _ = responder.send(true); diff --git a/lib/src/eth.rs b/lib/src/eth.rs index 899276322..4a241d029 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -6,7 +6,7 @@ use std::collections::HashSet; /// capabilities can send this action to the eth provider. /// /// Will be serialized and deserialized using `serde_json::to_vec` and `serde_json::from_slice`. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub enum EthAction { /// Subscribe to logs with a custom filter. ID is to be used to unsubscribe. /// Logs come in as alloy_rpc_types::pubsub::SubscriptionResults From 67bf726a2d51e9861f9de37a8729117f586dfa6e Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Wed, 28 Feb 2024 19:49:02 -0300 Subject: [PATCH 15/23] node<>node subscriptions working --- kinode/src/eth/mod.rs | 73 ++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 39 deletions(-) diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index 1b54d6600..724573b59 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -297,7 +297,7 @@ async fn handle_eth_action( // before returning an error. match eth_action { EthAction::SubscribeLogs { sub_id, .. } => { - create_new_subscription( + tokio::spawn(create_new_subscription( our.to_string(), km.id, km.source.clone(), @@ -308,8 +308,7 @@ async fn handle_eth_action( providers.clone(), active_subscriptions.clone(), response_channels.clone(), - ) - .await; + )); } EthAction::UnsubscribeLogs(sub_id) => { let mut sub_map = active_subscriptions @@ -346,36 +345,36 @@ async fn handle_eth_action( let send_to_loop = send_to_loop.clone(); let providers = providers.clone(); let response_channels = response_channels.clone(); - // tokio::spawn(async move { - let res = tokio::time::timeout( - std::time::Duration::from_secs(timeout), - fulfill_request(&our, km.id, &send_to_loop, eth_action, providers, receiver), - ) - .await; - match res { - Ok(Ok(response)) => { - kernel_message( - &our, - km.id, - km.source, - km.rsvp, - false, - None, - response, - &send_to_loop, - ) - .await; - } - Ok(Err(e)) => { - error_message(&our, km.id, km.source, e, &send_to_loop).await; - } - Err(_) => { - error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) + tokio::spawn(async move { + let res = tokio::time::timeout( + std::time::Duration::from_secs(timeout), + fulfill_request(&our, km.id, &send_to_loop, eth_action, providers, receiver), + ) + .await; + match res { + Ok(Ok(response)) => { + kernel_message( + &our, + km.id, + km.source, + km.rsvp, + false, + None, + response, + &send_to_loop, + ) .await; + } + Ok(Err(e)) => { + error_message(&our, km.id, km.source, e, &send_to_loop).await; + } + Err(_) => { + error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) + .await; + } } - } - response_channels.remove(&km.id); - // }); + response_channels.remove(&km.id); + }); } } Ok(()) @@ -408,7 +407,7 @@ async fn create_new_subscription( .await { Ok((Some(future), None)) => { - // this is a local sub + // this is a local sub, as in, we connect to the rpc endpt // send a response to the target that the subscription was successful kernel_message( &our, @@ -539,7 +538,7 @@ async fn build_subscription( None, true, Some(600), // TODO - serde_json::to_vec(ð_action).unwrap(), + eth_action, &send_to_loop, ) .await; @@ -563,14 +562,10 @@ async fn build_subscription( node_provider.usable = false; continue; } - if let EthResponse::Err(error) = ð_response { + if let EthResponse::Err(_error) = ð_response { // if we hit this, they sent an error, if it's an error that might // not be our fault, we can try another provider - match error { - EthError::NoRpcForChain => continue, - EthError::PermissionDenied => continue, - _ => {} - } + continue; } kernel_message( &our, From 8d21d5693f451be040a5b3a2dcbf3f58a67b0aad Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Thu, 29 Feb 2024 02:08:27 -0300 Subject: [PATCH 16/23] fix some sub bugs --- kinode/src/eth/mod.rs | 82 ++++++++++++++++++++++++++++++------------- 1 file changed, 58 insertions(+), 24 deletions(-) diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index 724573b59..00b13a0e2 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -119,7 +119,7 @@ pub async fn provider( mut recv_in_client: MessageReceiver, mut net_error_recv: NetworkErrorReceiver, caps_oracle: CapMessageSender, - _print_tx: PrintSender, + print_tx: PrintSender, ) -> Result<()> { let our = Arc::new(our); @@ -149,6 +149,23 @@ pub async fn provider( loop { tokio::select! { Some(wrapped_error) = net_error_recv.recv() => { + let _ = print_tx.send( + Printout { verbosity: 2, content: "eth: got network error".to_string() } + ).await; + // if we hold active subscriptions for the remote node that this error refers to, + // close them here -- they will need to resubscribe + if let Some(sub_map) = active_subscriptions.get(&wrapped_error.source) { + for (_sub_id, sub) in sub_map.iter() { + if let ActiveSub::Local(handle) = sub { + let _ = print_tx.send( + Printout { + verbosity: 2, + content: "eth: closing remote sub in response to network error".to_string() + }).await; + handle.abort(); + } + } + } // we got an error from a remote node provider -- // forward it to response channel if it exists if let Some(chan) = response_channels.get(&wrapped_error.id) { @@ -205,7 +222,7 @@ async fn handle_message( Ok(()) } Message::Request(req) => { - let timeout = *req.expects_response.as_ref().unwrap_or(&600); // TODO make this a config + let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config let Ok(req) = serde_json::from_slice::(&req.body) else { return Err(EthError::MalformedRequest); }; @@ -247,18 +264,36 @@ async fn handle_message( Ok(()) } IncomingReq::EthSubResult(eth_sub_result) => { - // forward this to rsvp - kernel_message( - our, - km.id, - km.source.clone(), - km.rsvp.clone(), - true, - None, - eth_sub_result, - send_to_loop, - ) - .await; + println!("eth: got eth_sub_result\r"); + // forward this to rsvp, if we have the sub id in our active subs + let Some(rsvp) = km.rsvp else { + return Ok(()); // no rsvp, no need to forward + }; + let sub_id = match ð_sub_result { + Ok(EthSub { id, .. }) => id, + Err(EthSubError { id, .. }) => id, + }; + if let Some(sub_map) = active_subscriptions.get(&rsvp) { + if let Some(sub) = sub_map.get(sub_id) { + if let ActiveSub::Remote(node_provider) = sub { + if node_provider == &km.source.node { + kernel_message( + our, + km.id, + rsvp, + None, + true, + None, + eth_sub_result, + send_to_loop, + ) + .await; + return Ok(()); + } + } + } + } + println!("eth: got eth_sub_result but no matching sub found\r"); Ok(()) } } @@ -329,7 +364,7 @@ async fn handle_eth_action( }, None, true, - Some(600), // TODO + Some(60), // TODO serde_json::to_vec(ð_action).unwrap(), send_to_loop, ) @@ -406,7 +441,7 @@ async fn create_new_subscription( ) .await { - Ok((Some(future), None)) => { + Ok((Some(maintain_subscription), None)) => { // this is a local sub, as in, we connect to the rpc endpt // send a response to the target that the subscription was successful kernel_message( @@ -423,15 +458,14 @@ async fn create_new_subscription( let mut subs = active_subscriptions .entry(target.clone()) .or_insert(HashMap::new()); - let target2 = target.clone(); - let active_subs = active_subscriptions.clone(); + let active_subscriptions = active_subscriptions.clone(); subs.insert( sub_id, ActiveSub::Local(tokio::spawn(async move { // await the subscription error and kill it if so - if let Err(e) = future.await { - error_message(&our, km_id, target2.clone(), e, &send_to_loop).await; - active_subs.entry(target2).and_modify(|sub_map| { + if let Err(e) = maintain_subscription.await { + error_message(&our, km_id, target.clone(), e, &send_to_loop).await; + active_subscriptions.entry(target).and_modify(|sub_map| { sub_map.remove(&km_id); }); } @@ -535,9 +569,9 @@ async fn build_subscription( node: node_provider.name.clone(), process: ETH_PROCESS_ID.clone(), }, - None, + rsvp.clone(), true, - Some(600), // TODO + Some(60), // TODO eth_action, &send_to_loop, ) @@ -673,7 +707,7 @@ async fn fulfill_request( }, None, true, - Some(600), // TODO + Some(60), // TODO eth_action.clone(), &send_to_loop, ) From ca756d537e5cbf2f03172a7b6b04e3b136035bb3 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Thu, 29 Feb 2024 03:22:37 -0300 Subject: [PATCH 17/23] adjust defaults, nits --- kinode/default_providers_mainnet.json | 6 ++--- kinode/default_providers_testnet.json | 37 +++++---------------------- kinode/src/eth/mod.rs | 6 ++++- kinode/src/main.rs | 18 ++++++------- 4 files changed, 22 insertions(+), 45 deletions(-) diff --git a/kinode/default_providers_mainnet.json b/kinode/default_providers_mainnet.json index 8a04c72af..b83f72e7a 100644 --- a/kinode/default_providers_mainnet.json +++ b/kinode/default_providers_mainnet.json @@ -8,7 +8,7 @@ }, { "chain_id": 10, - "trusted": true, + "trusted": false, "provider": { "Node": { "use_as_provider": true, @@ -26,7 +26,7 @@ }, { "chain_id": 10, - "trusted": true, + "trusted": false, "provider": { "Node": { "use_as_provider": true, @@ -44,7 +44,7 @@ }, { "chain_id": 10, - "trusted": true, + "trusted": false, "provider": { "Node": { "use_as_provider": true, diff --git a/kinode/default_providers_testnet.json b/kinode/default_providers_testnet.json index 30a16e7a9..528abfd41 100644 --- a/kinode/default_providers_testnet.json +++ b/kinode/default_providers_testnet.json @@ -4,39 +4,14 @@ "trusted": false, "provider": { "RpcUrl": "wss://ethereum.publicnode.com" - }, - "public": false, - "allow": [], - "deny": [] - }, - { - "chain_id": 11155111, - "trusted": true, - "provider": { - "Node": { - "use_as_provider": true, - "kns_update": { - "name": "sepoliarocks.os", - "owner": "", - "node": "0x2b2e9479333c5f94b62a242d75298ce98d13ad0af95070bc0b8d35aacdbddfa7", - "public_key": "0x958a3f43aee848826db2c0b36545e1e775bf310b003f0d7abf72ab8697a1b72c", - "ip": "", - "port": 0, - "routers": [ - "0xb35eb347deb896bc3fb6132a07fca1601f83462385ed11e835c24c33ba4ef73d", - "0xd827ae579fafa604af79fbed977e8abe048497f10885c6473dfd343a3b7b4458", - "0x96e36331c8f0882f2c0c46c13b15d812def04fe8606d503bc0e2be39db26486a" - ] - } - } } }, { "chain_id": 11155111, - "trusted": true, + "trusted": false, "provider": { "Node": { - "use_as_provider": false, + "use_as_provider": true, "kns_update": { "name": "default-router-1.os", "owner": "", @@ -51,10 +26,10 @@ }, { "chain_id": 11155111, - "trusted": true, + "trusted": false, "provider": { "Node": { - "use_as_provider": false, + "use_as_provider": true, "kns_update": { "name": "default-router-2.os", "owner": "", @@ -69,10 +44,10 @@ }, { "chain_id": 11155111, - "trusted": true, + "trusted": false, "provider": { "Node": { - "use_as_provider": false, + "use_as_provider": true, "kns_update": { "name": "default-router-3.os", "owner": "", diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index 00b13a0e2..c272630d9 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -222,7 +222,11 @@ async fn handle_message( Ok(()) } Message::Request(req) => { - let timeout = *req.expects_response.as_ref().unwrap_or(&60); // TODO make this a config + let Some(timeout) = req.expects_response else { + // if they don't want a response, we don't need to do anything + // might as well throw it away + return Err(EthError::MalformedRequest); + }; let Ok(req) = serde_json::from_slice::(&req.body) else { return Err(EthError::MalformedRequest); }; diff --git a/kinode/src/main.rs b/kinode/src/main.rs index 641ad753b..f731599e6 100644 --- a/kinode/src/main.rs +++ b/kinode/src/main.rs @@ -37,12 +37,6 @@ const SQLITE_CHANNEL_CAPACITY: usize = 1_000; const VERSION: &str = env!("CARGO_PKG_VERSION"); -/// This can and should be an environment variable / setting. It configures networking -/// such that indirect nodes always use routers, even when target is a direct node, -/// such that only their routers can ever see their physical networking details. -#[cfg(not(feature = "simulation-mode"))] -const REVEAL_IP: bool = true; - /// default routers as a eth-provider fallback const DEFAULT_PROVIDERS_TESTNET: &str = include_str!("../default_providers_testnet.json"); const DEFAULT_PROVIDERS_MAINNET: &str = include_str!("../default_providers_mainnet.json"); @@ -112,6 +106,11 @@ async fn main() { arg!(--verbosity "Verbosity level: higher is more verbose") .default_value("0") .value_parser(value_parser!(u8)), + ) + .arg( + arg!(--"reveal-ip" "If set to false, as an indirect node, always use routers to connect to other nodes.") + .default_value("true") + .value_parser(value_parser!(bool)), ); #[cfg(feature = "simulation-mode")] @@ -190,9 +189,9 @@ async fn main() { // default eth providers/routers let eth_provider_config: lib::eth::SavedConfigs = - match fs::read_to_string(format!("{}/.saved_providers", home_directory_path)).await { + match fs::read_to_string(format!("{}/.eth_providers", home_directory_path)).await { Ok(contents) => { - println!("loaded saved providers\r"); + println!("loaded saved eth providers\r"); serde_json::from_str(&contents).unwrap() } Err(_) => match on_testnet { @@ -508,7 +507,7 @@ async fn main() { net_message_sender, net_message_receiver, contract_chain_and_address.1, - REVEAL_IP, + *matches.get_one::("reveal-ip").unwrap_or(&true), )); #[cfg(feature = "simulation-mode")] tasks.spawn(net::mock_client( @@ -644,5 +643,4 @@ async fn main() { crossterm::terminal::SetTitle(""), ); println!("\r\n\x1b[38;5;196m{}\x1b[0m", quit_msg); - return; } From 425a3995b5bd6bebfb227ba97aa75d32a20cb5c9 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Fri, 1 Mar 2024 11:56:59 -0300 Subject: [PATCH 18/23] eth: make error messages requests, when necessary --- kinode/src/eth/mod.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index c272630d9..f9ec10f5e 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -189,7 +189,7 @@ pub async fn provider( ) .await { - error_message(&our, km_id, response_target, e, &send_to_loop).await; + error_message(&our, km_id, response_target, false, e, &send_to_loop).await; }; } } @@ -405,11 +405,18 @@ async fn handle_eth_action( .await; } Ok(Err(e)) => { - error_message(&our, km.id, km.source, e, &send_to_loop).await; + error_message(&our, km.id, km.source, false, e, &send_to_loop).await; } Err(_) => { - error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) - .await; + error_message( + &our, + km.id, + km.source, + false, + EthError::RpcTimeout, + &send_to_loop, + ) + .await; } } response_channels.remove(&km.id); @@ -468,7 +475,7 @@ async fn create_new_subscription( ActiveSub::Local(tokio::spawn(async move { // await the subscription error and kill it if so if let Err(e) = maintain_subscription.await { - error_message(&our, km_id, target.clone(), e, &send_to_loop).await; + error_message(&our, km_id, target.clone(), true, e, &send_to_loop).await; active_subscriptions.entry(target).and_modify(|sub_map| { sub_map.remove(&km_id); }); @@ -484,7 +491,7 @@ async fn create_new_subscription( subs.insert(sub_id, ActiveSub::Remote(provider_node)); } Err(e) => { - error_message(&our, km_id, target.clone(), e, &send_to_loop).await; + error_message(&our, km_id, target.clone(), false, e, &send_to_loop).await; } _ => panic!(), } @@ -874,6 +881,7 @@ async fn error_message( our: &str, km_id: u64, target: Address, + req: bool, error: EthError, send_to_loop: &MessageSender, ) { @@ -883,7 +891,7 @@ async fn error_message( km_id, target, None, - false, + req, None, EthResponse::Err(error), send_to_loop, From a2286ad3c46a068cf16a3bdbcf7a77f1067a64a8 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Fri, 1 Mar 2024 18:58:50 -0300 Subject: [PATCH 19/23] refactor to reduce crazy repetition --- kinode/src/eth/mod.rs | 701 ++++++++++++++++++++++-------------------- lib/src/eth.rs | 2 +- 2 files changed, 371 insertions(+), 332 deletions(-) diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index f9ec10f5e..1ef0c2880 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -88,8 +88,24 @@ impl ActiveProviders { } } +struct ModuleState { + /// the name of this node + our: Arc, + /// the access settings for this provider + access_settings: AccessSettings, + /// the set of providers we have available for all chains + providers: Providers, + /// the set of active subscriptions we are currently maintaining + active_subscriptions: ActiveSubscriptions, + /// the set of response channels we have open for outstanding request tasks + response_channels: ResponseChannels, + /// our sender for kernel event loop + send_to_loop: MessageSender, + /// our sender for terminal prints + print_tx: PrintSender, +} + async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { - println!("provider: activate_url_provider\r"); match Url::parse(&provider.url)?.scheme() { "ws" | "wss" => { let connector = WsConnect { @@ -111,7 +127,8 @@ async fn activate_url_provider(provider: &mut UrlProvider) -> Result<()> { } /// The ETH provider runtime process is responsible for connecting to one or more ETH RPC providers -/// and using them to service indexing requests from other apps. +/// and using them to service indexing requests from other apps. This is the runtime entry point +/// for the entire module. pub async fn provider( our: String, configs: SavedConfigs, @@ -121,105 +138,118 @@ pub async fn provider( caps_oracle: CapMessageSender, print_tx: PrintSender, ) -> Result<()> { - let our = Arc::new(our); - - let mut access_settings = AccessSettings { - public: false, - allow: HashSet::new(), - deny: HashSet::new(), + let mut state = ModuleState { + our: Arc::new(our), + access_settings: AccessSettings { + public: false, + allow: HashSet::new(), + deny: HashSet::new(), + }, + providers: Arc::new(DashMap::new()), + active_subscriptions: Arc::new(DashMap::new()), + response_channels: Arc::new(DashMap::new()), + send_to_loop, + print_tx, }; // convert saved configs into data structure that we will use to route queries - let mut providers: Providers = Arc::new(DashMap::new()); for entry in configs { - let mut ap = providers.entry(entry.chain_id).or_insert(ActiveProviders { - urls: vec![], - nodes: vec![], - }); + let mut ap = state + .providers + .entry(entry.chain_id) + .or_insert(ActiveProviders { + urls: vec![], + nodes: vec![], + }); ap.add_provider_config(entry); } - // handles of longrunning subscriptions. - let mut active_subscriptions: ActiveSubscriptions = Arc::new(DashMap::new()); - - // channels to pass incoming responses to outstanding requests - // keyed by KM ID - let mut response_channels: Arc> = Arc::new(DashMap::new()); + verbose_print(&state.print_tx, "eth: provider initialized").await; loop { tokio::select! { Some(wrapped_error) = net_error_recv.recv() => { - let _ = print_tx.send( - Printout { verbosity: 2, content: "eth: got network error".to_string() } + handle_network_error( + wrapped_error, + &state.active_subscriptions, + &state.response_channels, + &state.print_tx ).await; - // if we hold active subscriptions for the remote node that this error refers to, - // close them here -- they will need to resubscribe - if let Some(sub_map) = active_subscriptions.get(&wrapped_error.source) { - for (_sub_id, sub) in sub_map.iter() { - if let ActiveSub::Local(handle) = sub { - let _ = print_tx.send( - Printout { - verbosity: 2, - content: "eth: closing remote sub in response to network error".to_string() - }).await; - handle.abort(); - } - } - } - // we got an error from a remote node provider -- - // forward it to response channel if it exists - if let Some(chan) = response_channels.get(&wrapped_error.id) { - // can't close channel here, as response may be an error - // and fulfill_request may wish to try other providers. - let _ = chan.send(Err(wrapped_error)).await; - } } Some(km) = recv_in_client.recv() => { let km_id = km.id; let response_target = km.rsvp.as_ref().unwrap_or(&km.source).clone(); if let Err(e) = handle_message( - &our, - &mut access_settings, + &mut state, km, - &send_to_loop, &caps_oracle, - &mut providers, - &mut active_subscriptions, - &mut response_channels, ) .await { - error_message(&our, km_id, response_target, false, e, &send_to_loop).await; + error_message( + &state.our, + km_id, + response_target, + e, + &state.send_to_loop + ).await; }; } } } } +async fn handle_network_error( + wrapped_error: WrappedSendError, + active_subscriptions: &ActiveSubscriptions, + response_channels: &ResponseChannels, + print_tx: &PrintSender, +) { + verbose_print(&print_tx, "eth: got network error").await; + // if we hold active subscriptions for the remote node that this error refers to, + // close them here -- they will need to resubscribe + if let Some(sub_map) = active_subscriptions.get(&wrapped_error.source) { + for (_sub_id, sub) in sub_map.iter() { + if let ActiveSub::Local(handle) = sub { + verbose_print( + &print_tx, + "eth: closing local sub in response to network error", + ) + .await; + handle.abort(); + } + } + } + // we got an error from a remote node provider -- + // forward it to response channel if it exists + if let Some(chan) = response_channels.get(&wrapped_error.id) { + // can't close channel here, as response may be an error + // and fulfill_request may wish to try other providers. + let _ = chan.send(Err(wrapped_error)).await; + } +} + /// handle incoming requests, namely [`EthAction`] and [`EthConfigAction`]. /// also handle responses that are passthroughs from remote provider nodes. async fn handle_message( - our: &str, - access_settings: &mut AccessSettings, + state: &mut ModuleState, km: KernelMessage, - send_to_loop: &MessageSender, caps_oracle: &CapMessageSender, - providers: &mut Providers, - active_subscriptions: &mut ActiveSubscriptions, - response_channels: &mut ResponseChannels, ) -> Result<(), EthError> { - println!("provider: handle_message\r"); match &km.message { Message::Response(_) => { // map response to the correct channel - if let Some(chan) = response_channels.get(&km.id) { + if let Some(chan) = state.response_channels.get(&km.id) { // can't close channel here, as response may be an error // and fulfill_request may wish to try other providers. let _ = chan.send(Ok(km)).await; } else { - println!("eth: got weird response!!\r"); + verbose_print( + &state.print_tx, + "eth: got response but no matching channel found", + ) + .await; } - Ok(()) } Message::Request(req) => { let Some(timeout) = req.expects_response else { @@ -232,43 +262,22 @@ async fn handle_message( }; match req { IncomingReq::EthAction(eth_action) => { - handle_eth_action( - our, - access_settings, - send_to_loop, - km, - timeout, - eth_action, - providers, - active_subscriptions, - response_channels, - ) - .await + return handle_eth_action(state, km, timeout, eth_action).await; } IncomingReq::EthConfigAction(eth_config_action) => { kernel_message( - our, + &state.our.clone(), km.id, - km.source.clone(), - km.rsvp.clone(), + km.rsvp.as_ref().unwrap_or(&km.source).clone(), + None, false, None, - handle_eth_config_action( - our, - access_settings, - caps_oracle, - &km, - eth_config_action, - providers, - ) - .await, - send_to_loop, + handle_eth_config_action(state, caps_oracle, &km, eth_config_action).await, + &state.send_to_loop, ) .await; - Ok(()) } IncomingReq::EthSubResult(eth_sub_result) => { - println!("eth: got eth_sub_result\r"); // forward this to rsvp, if we have the sub id in our active subs let Some(rsvp) = km.rsvp else { return Ok(()); // no rsvp, no need to forward @@ -277,19 +286,19 @@ async fn handle_message( Ok(EthSub { id, .. }) => id, Err(EthSubError { id, .. }) => id, }; - if let Some(sub_map) = active_subscriptions.get(&rsvp) { + if let Some(sub_map) = state.active_subscriptions.get(&rsvp) { if let Some(sub) = sub_map.get(sub_id) { if let ActiveSub::Remote(node_provider) = sub { if node_provider == &km.source.node { kernel_message( - our, + &state.our, km.id, rsvp, None, true, None, eth_sub_result, - send_to_loop, + &state.send_to_loop, ) .await; return Ok(()); @@ -297,34 +306,31 @@ async fn handle_message( } } } - println!("eth: got eth_sub_result but no matching sub found\r"); - Ok(()) + verbose_print( + &state.print_tx, + "eth: got eth_sub_result but no matching sub found", + ) + .await; } } } } + Ok(()) } async fn handle_eth_action( - our: &str, - access_settings: &mut AccessSettings, - send_to_loop: &MessageSender, + state: &mut ModuleState, km: KernelMessage, timeout: u64, eth_action: EthAction, - providers: &mut Providers, - active_subscriptions: &mut ActiveSubscriptions, - response_channels: &mut ResponseChannels, ) -> Result<(), EthError> { - println!("provider: handle_eth_action: {eth_action:?}\r"); - println!("access settings: {access_settings:?}\r"); // check our access settings if the request is from a remote node - if km.source.node != our { - if access_settings.deny.contains(&km.source.node) { + if km.source.node != *state.our { + if state.access_settings.deny.contains(&km.source.node) { return Err(EthError::PermissionDenied); } - if !access_settings.public { - if !access_settings.allow.contains(&km.source.node) { + if !state.access_settings.public { + if !state.access_settings.allow.contains(&km.source.node) { return Err(EthError::PermissionDenied); } } @@ -337,20 +343,22 @@ async fn handle_eth_action( match eth_action { EthAction::SubscribeLogs { sub_id, .. } => { tokio::spawn(create_new_subscription( - our.to_string(), + state.our.to_string(), km.id, km.source.clone(), km.rsvp, - send_to_loop.clone(), + state.send_to_loop.clone(), sub_id, eth_action, - providers.clone(), - active_subscriptions.clone(), - response_channels.clone(), + state.providers.clone(), + state.active_subscriptions.clone(), + state.response_channels.clone(), + state.print_tx.clone(), )); } EthAction::UnsubscribeLogs(sub_id) => { - let mut sub_map = active_subscriptions + let mut sub_map = state + .active_subscriptions .entry(km.source) .or_insert(HashMap::new()); if let Some(sub) = sub_map.remove(&sub_id) { @@ -360,7 +368,7 @@ async fn handle_eth_action( } ActiveSub::Remote(node) => { kernel_message( - our, + &state.our, rand::random(), Address { node: node.clone(), @@ -370,7 +378,7 @@ async fn handle_eth_action( true, Some(60), // TODO serde_json::to_vec(ð_action).unwrap(), - send_to_loop, + &state.send_to_loop, ) .await; } @@ -379,24 +387,33 @@ async fn handle_eth_action( } EthAction::Request { .. } => { let (sender, receiver) = tokio::sync::mpsc::channel(1); - response_channels.insert(km.id, sender); - let our = our.to_string(); - let send_to_loop = send_to_loop.clone(); - let providers = providers.clone(); - let response_channels = response_channels.clone(); + state.response_channels.insert(km.id, sender); + let our = state.our.to_string(); + let send_to_loop = state.send_to_loop.clone(); + let providers = state.providers.clone(); + let response_channels = state.response_channels.clone(); + let print_tx = state.print_tx.clone(); tokio::spawn(async move { - let res = tokio::time::timeout( + match tokio::time::timeout( std::time::Duration::from_secs(timeout), - fulfill_request(&our, km.id, &send_to_loop, eth_action, providers, receiver), + fulfill_request( + &our, + km.id, + &send_to_loop, + eth_action, + providers, + receiver, + &print_tx, + ), ) - .await; - match res { - Ok(Ok(response)) => { + .await + { + Ok(response) => { kernel_message( &our, km.id, - km.source, - km.rsvp, + km.rsvp.unwrap_or(km.source), + None, false, None, response, @@ -404,19 +421,10 @@ async fn handle_eth_action( ) .await; } - Ok(Err(e)) => { - error_message(&our, km.id, km.source, false, e, &send_to_loop).await; - } Err(_) => { - error_message( - &our, - km.id, - km.source, - false, - EthError::RpcTimeout, - &send_to_loop, - ) - .await; + // task timeout + error_message(&our, km.id, km.source, EthError::RpcTimeout, &send_to_loop) + .await; } } response_channels.remove(&km.id); @@ -438,17 +446,19 @@ async fn create_new_subscription( providers: Providers, active_subscriptions: ActiveSubscriptions, response_channels: ResponseChannels, + print_tx: PrintSender, ) { - println!("provider: create_new_subscription\r"); + verbose_print(&print_tx, "eth: creating new subscription").await; match build_subscription( - our.clone(), + &our, km_id, - target.clone(), - rsvp.clone(), - send_to_loop.clone(), + &target, + &rsvp, + &send_to_loop, ð_action, - providers, - response_channels.clone(), + &providers, + &response_channels, + &print_tx, ) .await { @@ -475,7 +485,20 @@ async fn create_new_subscription( ActiveSub::Local(tokio::spawn(async move { // await the subscription error and kill it if so if let Err(e) = maintain_subscription.await { - error_message(&our, km_id, target.clone(), true, e, &send_to_loop).await; + kernel_message( + &our, + rand::random(), + target.clone(), + None, + true, + None, + EthSubResult::Err(EthSubError { + id: sub_id, + error: e, + }), + &send_to_loop, + ) + .await; active_subscriptions.entry(target).and_modify(|sub_map| { sub_map.remove(&km_id); }); @@ -491,30 +514,30 @@ async fn create_new_subscription( subs.insert(sub_id, ActiveSub::Remote(provider_node)); } Err(e) => { - error_message(&our, km_id, target.clone(), false, e, &send_to_loop).await; + error_message(&our, km_id, target.clone(), e, &send_to_loop).await; } _ => panic!(), } } async fn build_subscription( - our: String, + our: &str, km_id: u64, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, + target: &Address, + rsvp: &Option
, + send_to_loop: &MessageSender, eth_action: &EthAction, - providers: Providers, - response_channels: ResponseChannels, + providers: &Providers, + response_channels: &ResponseChannels, + print_tx: &PrintSender, ) -> Result< ( // this is dumb, sorry - Option>>, + Option>>, Option, ), EthError, > { - println!("provider: build_subscription\r"); let EthAction::SubscribeLogs { sub_id, chain_id, @@ -535,6 +558,7 @@ async fn build_subscription( Some(pubsub) => pubsub, None => { if let Ok(()) = activate_url_provider(url_provider).await { + verbose_print(print_tx, "eth: activated a url provider").await; url_provider.pubsub.as_ref().unwrap() } else { continue; @@ -551,12 +575,12 @@ async fn build_subscription( let rx = pubsub.inner().get_raw_subscription(id).await; return Ok(( Some(maintain_subscription( - our, + our.to_string(), *sub_id, rx, - target, - rsvp, - send_to_loop, + target.clone(), + rsvp.clone(), + send_to_loop.clone(), )), None, )); @@ -568,63 +592,42 @@ async fn build_subscription( let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); response_channels.insert(km_id, sender); for node_provider in &mut aps.nodes { - if !node_provider.usable { - continue; - } - // in order, forward the request to each node provider - // until one sends back a satisfactory response - kernel_message( + match forward_to_node_provider( &our, km_id, - Address { - node: node_provider.name.clone(), - process: ETH_PROCESS_ID.clone(), - }, rsvp.clone(), - true, - Some(60), // TODO - eth_action, + node_provider, + eth_action.clone(), &send_to_loop, + &mut response_receiver, ) - .await; - let Some(Ok(response_km)) = response_receiver.recv().await else { - // our message timed out or receiver was offline - println!("provider: build_subscription: response_receiver timed out / is offline\r"); - continue; - }; - let Message::Response((resp, _context)) = response_km.message else { - // if we hit this, they spoofed a request with same id, ignore and possibly punish - node_provider.usable = false; - continue; - }; - let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { - // if we hit this, they sent a malformed response, ignore and possibly punish - node_provider.usable = false; - continue; - }; - if let EthResponse::Response { .. } = ð_response { - // if we hit this, they sent a response instead of a subscription, ignore and possibly punish - node_provider.usable = false; - continue; - } - if let EthResponse::Err(_error) = ð_response { - // if we hit this, they sent an error, if it's an error that might - // not be our fault, we can try another provider - continue; + .await + { + EthResponse::Ok => { + kernel_message( + &our, + km_id, + target.clone(), + None, + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + response_channels.remove(&km_id); + return Ok((None, Some(node_provider.name.clone()))); + } + EthResponse::Response { .. } => { + // the response to a SubscribeLogs request must be an 'ok' + node_provider.usable = false; + } + EthResponse::Err(e) => { + if e == EthError::RpcMalformedResponse { + node_provider.usable = false; + } + } } - kernel_message( - &our, - km_id, - target, - None, - false, - None, - EthResponse::Ok, - &send_to_loop, - ) - .await; - response_channels.remove(&km_id); - return Ok((None, Some(node_provider.name.clone()))); } return Err(EthError::NoRpcForChain); } @@ -636,15 +639,11 @@ async fn maintain_subscription( target: Address, rsvp: Option
, send_to_loop: MessageSender, -) -> Result<(), EthError> { - println!("provider: maintain_subscription\r"); +) -> Result<(), String> { loop { - let value = rx - .recv() - .await - .map_err(|_| EthError::SubscriptionClosed(sub_id))?; + let value = rx.recv().await.map_err(|e| e.to_string())?; let result: SubscriptionResult = - serde_json::from_str(value.get()).map_err(|_| EthError::SubscriptionClosed(sub_id))?; + serde_json::from_str(value.get()).map_err(|e| e.to_string())?; kernel_message( &our, rand::random(), @@ -666,21 +665,21 @@ async fn fulfill_request( eth_action: EthAction, providers: Providers, mut remote_request_receiver: ProcessMessageReceiver, -) -> Result { - println!("provider: fulfill_request\r"); + print_tx: &PrintSender, +) -> EthResponse { let EthAction::Request { chain_id, ref method, ref params, } = eth_action else { - return Err(EthError::PermissionDenied); // will never hit + return EthResponse::Err(EthError::PermissionDenied); // will never hit }; let Some(method) = to_static_str(&method) else { - return Err(EthError::InvalidMethod(method.to_string())); + return EthResponse::Err(EthError::InvalidMethod(method.to_string())); }; let Some(mut aps) = providers.get_mut(&chain_id) else { - return Err(EthError::NoRpcForChain); + return EthResponse::Err(EthError::NoRpcForChain); }; // first, try any url providers we have for this chain, // then if we have none or they all fail, go to node provider. @@ -690,6 +689,7 @@ async fn fulfill_request( Some(pubsub) => pubsub, None => { if let Ok(()) = activate_url_provider(url_provider).await { + verbose_print(print_tx, "eth: activated a url provider").await; url_provider.pubsub.as_ref().unwrap() } else { continue; @@ -701,97 +701,103 @@ async fn fulfill_request( url_provider.pubsub = None; continue; }; - return Ok(EthResponse::Response { value }); + return EthResponse::Response { value }; } for node_provider in &mut aps.nodes { - if !node_provider.usable || node_provider.name == our { - continue; - } - // in order, forward the request to each node provider - // until one sends back a satisfactory response - kernel_message( + let response = forward_to_node_provider( our, km_id, - Address { - node: node_provider.name.clone(), - process: ETH_PROCESS_ID.clone(), - }, - None, - true, - Some(60), // TODO + None, // no rsvp needed for a discrete request + node_provider, eth_action.clone(), - &send_to_loop, + send_to_loop, + &mut remote_request_receiver, ) .await; - let Some(Ok(response_km)) = remote_request_receiver.recv().await else { - println!("provider: fulfill_request: remote_request_receiver timed out / is offline\r"); - continue; - }; - let Message::Response((resp, _context)) = response_km.message else { - // if we hit this, they spoofed a request with same id, ignore and possibly punish - node_provider.usable = false; - continue; - }; - let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { - // if we hit this, they sent a malformed response, ignore and possibly punish - node_provider.usable = false; - continue; - }; - if let EthResponse::Err(error) = ð_response { - // if we hit this, they sent an error, if it's an error that might - // not be our fault, we can try another provider - match error { - EthError::NoRpcForChain => continue, - EthError::PermissionDenied => continue, - _ => {} + if let EthResponse::Err(e) = response { + if e == EthError::RpcMalformedResponse { + node_provider.usable = false; } + } else { + return response; } - return Ok(eth_response); } - Err(EthError::NoRpcForChain) + EthResponse::Err(EthError::NoRpcForChain) } -async fn handle_eth_config_action( +/// take an EthAction and send it to a node provider, then await a response. +async fn forward_to_node_provider( our: &str, - access_settings: &mut AccessSettings, + km_id: u64, + rsvp: Option
, + node_provider: &NodeProvider, + eth_action: EthAction, + send_to_loop: &MessageSender, + receiver: &mut ProcessMessageReceiver, +) -> EthResponse { + if !node_provider.usable || node_provider.name == our { + return EthResponse::Err(EthError::PermissionDenied); + } + // in order, forward the request to each node provider + // until one sends back a satisfactory response + kernel_message( + our, + km_id, + Address { + node: node_provider.name.clone(), + process: ETH_PROCESS_ID.clone(), + }, + rsvp, + true, + Some(60), // TODO + eth_action.clone(), + &send_to_loop, + ) + .await; + let Some(Ok(response_km)) = receiver.recv().await else { + return EthResponse::Err(EthError::RpcTimeout); + }; + let Message::Response((resp, _context)) = response_km.message else { + // if we hit this, they spoofed a request with same id, ignore and possibly punish + return EthResponse::Err(EthError::RpcMalformedResponse); + }; + let Ok(eth_response) = serde_json::from_slice::(&resp.body) else { + // if we hit this, they sent a malformed response, ignore and possibly punish + return EthResponse::Err(EthError::RpcMalformedResponse); + }; + eth_response +} + +async fn handle_eth_config_action( + state: &mut ModuleState, caps_oracle: &CapMessageSender, km: &KernelMessage, eth_config_action: EthConfigAction, - providers: &mut Providers, ) -> EthConfigResponse { - println!("provider: handle_eth_config_action\r"); - if km.source.node != our { + if km.source.node != *state.our { + verbose_print( + &state.print_tx, + "eth: got eth_config_action from unauthorized remote source", + ) + .await; return EthConfigResponse::PermissionDenied; } + // check capabilities to ensure the sender is allowed to make this request - let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); - caps_oracle - .send(CapMessage::Has { - on: km.source.process.clone(), - cap: Capability { - issuer: Address { - node: our.to_string(), - process: ETH_PROCESS_ID.clone(), - }, - params: serde_json::to_string(&serde_json::json!({ - "root": true, - })) - .unwrap(), - }, - responder: send_cap_bool, - }) - .await - .expect("eth: capability oracle died!"); - if !recv_cap_bool.await.unwrap_or(false) { - println!("eth: capability oracle denied request, no cap\r"); + if !check_for_root_cap(&state.our, &km.source.process, caps_oracle).await { + verbose_print( + &state.print_tx, + "eth: got eth_config_action from unauthorized local source", + ) + .await; return EthConfigResponse::PermissionDenied; } - println!("cap valid\r"); // modify our providers and access settings based on config action match eth_config_action { EthConfigAction::AddProvider(provider) => { - let mut aps = providers + let mut aps = state + .providers .entry(provider.chain_id) .or_insert(ActiveProviders { urls: vec![], @@ -800,29 +806,27 @@ async fn handle_eth_config_action( aps.add_provider_config(provider); } EthConfigAction::RemoveProvider((chain_id, remove)) => { - if let Some(mut aps) = providers.get_mut(&chain_id) { + if let Some(mut aps) = state.providers.get_mut(&chain_id) { aps.remove_provider(&remove); } } EthConfigAction::SetPublic => { - println!("set public\r"); - access_settings.public = true; + state.access_settings.public = true; } EthConfigAction::SetPrivate => { - println!("set private\r"); - access_settings.public = false; + state.access_settings.public = false; } EthConfigAction::AllowNode(node) => { - access_settings.allow.insert(node); + state.access_settings.allow.insert(node); } EthConfigAction::UnallowNode(node) => { - access_settings.allow.remove(&node); + state.access_settings.allow.remove(&node); } EthConfigAction::DenyNode(node) => { - access_settings.deny.insert(node); + state.access_settings.deny.insert(node); } EthConfigAction::UndenyNode(node) => { - access_settings.deny.remove(&node); + state.access_settings.deny.remove(&node); } EthConfigAction::SetProviders(new_providers) => { let new_map = DashMap::new(); @@ -833,65 +837,100 @@ async fn handle_eth_config_action( }); aps.add_provider_config(entry); } - *providers = Arc::new(new_map); + state.providers = Arc::new(new_map); } EthConfigAction::GetProviders => { - return EthConfigResponse::Providers( - providers - .iter() - .map(|entry| { - entry - .urls - .iter() - .map(|url_provider| ProviderConfig { - chain_id: *entry.key(), - provider: NodeOrRpcUrl::RpcUrl(url_provider.url.clone()), - trusted: url_provider.trusted, - }) - .chain(entry.nodes.iter().map(|node_provider| ProviderConfig { - chain_id: *entry.key(), - provider: NodeOrRpcUrl::Node { - kns_update: KnsUpdate { - name: node_provider.name.clone(), - owner: "".to_string(), - node: "".to_string(), - public_key: "".to_string(), - ip: "".to_string(), - port: 0, - routers: vec![], - }, - use_as_provider: node_provider.usable, - }, - trusted: node_provider.trusted, - })) - .collect::>() - }) - .flatten() - .collect(), - ); + return EthConfigResponse::Providers(providers_to_saved_configs(&state.providers)); } EthConfigAction::GetAccessSettings => { - return EthConfigResponse::AccessSettings(access_settings.clone()); + return EthConfigResponse::AccessSettings(state.access_settings.clone()); } } EthConfigResponse::Ok } +fn providers_to_saved_configs(providers: &Providers) -> SavedConfigs { + providers + .iter() + .map(|entry| { + entry + .urls + .iter() + .map(|url_provider| ProviderConfig { + chain_id: *entry.key(), + provider: NodeOrRpcUrl::RpcUrl(url_provider.url.clone()), + trusted: url_provider.trusted, + }) + .chain(entry.nodes.iter().map(|node_provider| ProviderConfig { + chain_id: *entry.key(), + provider: NodeOrRpcUrl::Node { + kns_update: KnsUpdate { + name: node_provider.name.clone(), + owner: "".to_string(), + node: "".to_string(), + public_key: "".to_string(), + ip: "".to_string(), + port: 0, + routers: vec![], + }, + use_as_provider: node_provider.usable, + }, + trusted: node_provider.trusted, + })) + .collect::>() + }) + .flatten() + .collect() +} + +async fn check_for_root_cap( + our: &str, + process: &ProcessId, + caps_oracle: &CapMessageSender, +) -> bool { + let (send_cap_bool, recv_cap_bool) = tokio::sync::oneshot::channel(); + caps_oracle + .send(CapMessage::Has { + on: process.clone(), + cap: Capability { + issuer: Address { + node: our.to_string(), + process: ETH_PROCESS_ID.clone(), + }, + params: serde_json::to_string(&serde_json::json!({ + "root": true, + })) + .unwrap(), + }, + responder: send_cap_bool, + }) + .await + .expect("eth: capability oracle died!"); + recv_cap_bool.await.unwrap_or(false) +} + +async fn verbose_print(print_tx: &PrintSender, content: &str) { + let _ = print_tx + .send(Printout { + verbosity: 2, + content: content.to_string(), + }) + .await; +} + async fn error_message( our: &str, km_id: u64, target: Address, - req: bool, error: EthError, send_to_loop: &MessageSender, ) { - println!("EthError: {error:?}\r"); kernel_message( our, km_id, target, None, - req, + false, None, EthResponse::Err(error), send_to_loop, diff --git a/lib/src/eth.rs b/lib/src/eth.rs index 4a241d029..a66900d86 100644 --- a/lib/src/eth.rs +++ b/lib/src/eth.rs @@ -59,7 +59,7 @@ pub enum EthResponse { Err(EthError), } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, PartialEq)] pub enum EthError { /// provider module cannot parse message MalformedRequest, From 44555ba954b7396cecbcfb55de882cc150f605dd Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 4 Mar 2024 00:03:15 -0300 Subject: [PATCH 20/23] kns_indexer bugfix, refactor eth again --- .../kns_indexer/kns_indexer/src/lib.rs | 38 +- kinode/src/eth/mod.rs | 372 +++++------------- kinode/src/eth/subscription.rs | 288 ++++++++++++++ kinode/src/kernel/process.rs | 4 +- 4 files changed, 409 insertions(+), 293 deletions(-) create mode 100644 kinode/src/eth/subscription.rs diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index 346aa5f3e..1f3945bfc 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -1,7 +1,6 @@ use alloy_sol_types::{sol, SolEvent}; use kinode_process_lib::{ - await_message, eth, get_typed_state, print_to_terminal, println, set_state, Address, Message, - Request, Response, + await_message, eth, get_typed_state, println, set_state, Address, Message, Request, Response, }; use serde::{Deserialize, Serialize}; use std::collections::{ @@ -28,7 +27,7 @@ struct State { // human readable name to most recent on-chain routing information as json // NOTE: not every namehash will have a node registered nodes: HashMap, - // last block we read from + // last block we have an update from block: u64, } @@ -95,9 +94,9 @@ sol! { event RoutingUpdate(bytes32 indexed node, bytes32[] routers); } -fn subscribe_to_logs(eth_provider: ð::Provider, filter: eth::Filter) { +fn subscribe_to_logs(eth_provider: ð::Provider, from_block: u64, filter: eth::Filter) { loop { - match eth_provider.subscribe(1, filter.clone()) { + match eth_provider.subscribe(1, filter.clone().from_block(from_block)) { Ok(()) => break, Err(_) => { println!("kns_indexer: failed to subscribe to chain! trying again in 5s..."); @@ -147,6 +146,10 @@ impl Guest for Component { block: 1, } } else { + println!( + "kns_indexer: loading in {} persisted PKI entries", + s.nodes.len() + ); s } } @@ -221,7 +224,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { set_state(&bincode::serialize(&state)?); - subscribe_to_logs(ð_provider, filter.clone()); + subscribe_to_logs(ð_provider, state.block - 1, filter.clone()); let mut pending_requests: BTreeMap> = BTreeMap::new(); @@ -301,7 +304,7 @@ fn handle_eth_message( } Err(e) => { println!("kns_indexer: got sub error, resubscribing.. {:?}", e.error); - subscribe_to_logs(ð_provider, filter.clone()); + subscribe_to_logs(ð_provider, state.block - 1, filter.clone()); } } @@ -340,8 +343,6 @@ fn handle_eth_message( } fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Result<()> { - state.block = log.block_number.expect("expect").to::(); - let node_id = log.topics[1]; let name = match state.names.entry(node_id.to_string()) { @@ -403,18 +404,23 @@ fn handle_log(our: &Address, state: &mut State, log: ð::Log) -> anyhow::Resul && ((node.ip != "" && node.port != 0) || node.routers.len() > 0) && send { - print_to_terminal( - 1, - &format!( - "kns_indexer: sending ID to net: {node:?} (blocknum {})", - state.block - ), - ); Request::new() .target((&our.node, "net", "distro", "sys")) .try_body(NetActions::KnsUpdate(node.clone()))? .send()?; } + + // if new block is > 100 from last block, save state + let block = log.block_number.expect("expect").to::(); + if block > state.block + 100 { + println!( + "kns_indexer: persisting {} PKI entries at block {}", + state.nodes.len(), + block + ); + state.block = block; + set_state(&bincode::serialize(state)?); + } Ok(()) } diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index 1ef0c2880..f8597cb6b 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -1,11 +1,9 @@ use alloy_providers::provider::Provider; -use alloy_pubsub::{PubSubFrontend, RawSubscription}; +use alloy_pubsub::PubSubFrontend; use alloy_rpc_client::ClientBuilder; -use alloy_rpc_types::pubsub::SubscriptionResult; use alloy_transport_ws::WsConnect; use anyhow::Result; use dashmap::DashMap; -use futures::Future; use lib::types::core::*; use lib::types::eth::*; use serde::{Deserialize, Serialize}; @@ -14,6 +12,8 @@ use std::sync::Arc; use tokio::task::JoinHandle; use url::Url; +mod subscription; + /// meta-type for all incoming requests we need to handle #[derive(Debug, Serialize, Deserialize)] #[serde(untagged)] @@ -48,17 +48,6 @@ struct NodeProvider { pub name: String, } -/// existing subscriptions held by local OR remote processes -type ActiveSubscriptions = Arc>>; - -type ResponseChannels = Arc>; - -#[derive(Debug)] -enum ActiveSub { - Local(JoinHandle<()>), - Remote(String), // name of node providing this subscription for us -} - impl ActiveProviders { fn add_provider_config(&mut self, new: ProviderConfig) { match new.provider { @@ -88,6 +77,53 @@ impl ActiveProviders { } } +/// existing subscriptions held by local OR remote processes +type ActiveSubscriptions = Arc>>; + +type ResponseChannels = Arc>; + +#[derive(Debug)] +enum ActiveSub { + Local(JoinHandle<()>), + Remote { + provider_node: String, + handle: JoinHandle<()>, + sender: tokio::sync::mpsc::Sender, + }, +} + +impl ActiveSub { + async fn close(&self, sub_id: u64, state: &ModuleState) { + match self { + ActiveSub::Local(handle) => { + handle.abort(); + } + ActiveSub::Remote { + provider_node, + handle, + .. + } => { + // tell provider node we don't need their services anymore + kernel_message( + &state.our, + rand::random(), + Address { + node: provider_node.clone(), + process: ETH_PROCESS_ID.clone(), + }, + None, + true, + None, + EthAction::UnsubscribeLogs(sub_id), + &state.send_to_loop, + ) + .await; + handle.abort(); + } + } + } +} + struct ModuleState { /// the name of this node our: Arc, @@ -208,6 +244,7 @@ async fn handle_network_error( verbose_print(&print_tx, "eth: got network error").await; // if we hold active subscriptions for the remote node that this error refers to, // close them here -- they will need to resubscribe + // TODO is this necessary? if let Some(sub_map) = active_subscriptions.get(&wrapped_error.source) { for (_sub_id, sub) in sub_map.iter() { if let ActiveSub::Local(handle) = sub { @@ -282,35 +319,42 @@ async fn handle_message( let Some(rsvp) = km.rsvp else { return Ok(()); // no rsvp, no need to forward }; - let sub_id = match ð_sub_result { + let sub_id = match eth_sub_result { Ok(EthSub { id, .. }) => id, Err(EthSubError { id, .. }) => id, }; if let Some(sub_map) = state.active_subscriptions.get(&rsvp) { - if let Some(sub) = sub_map.get(sub_id) { - if let ActiveSub::Remote(node_provider) = sub { - if node_provider == &km.source.node { - kernel_message( - &state.our, - km.id, - rsvp, - None, - true, - None, - eth_sub_result, - &state.send_to_loop, - ) - .await; + if let Some(ActiveSub::Remote { + provider_node, + sender, + .. + }) = sub_map.get(&sub_id) + { + if provider_node == &km.source.node { + if let Ok(()) = sender.send(eth_sub_result).await { return Ok(()); } } } } + // tell the remote provider that we don't have this sub + // so they can stop sending us updates verbose_print( &state.print_tx, "eth: got eth_sub_result but no matching sub found", ) .await; + kernel_message( + &state.our.clone(), + km.id, + km.source.clone(), + None, + true, + None, + EthAction::UnsubscribeLogs(sub_id), + &state.send_to_loop, + ) + .await; } } } @@ -327,22 +371,38 @@ async fn handle_eth_action( // check our access settings if the request is from a remote node if km.source.node != *state.our { if state.access_settings.deny.contains(&km.source.node) { + verbose_print( + &state.print_tx, + "eth: got eth_action from unauthorized remote source", + ) + .await; return Err(EthError::PermissionDenied); } if !state.access_settings.public { if !state.access_settings.allow.contains(&km.source.node) { + verbose_print( + &state.print_tx, + "eth: got eth_action from unauthorized remote source", + ) + .await; return Err(EthError::PermissionDenied); } } } + verbose_print( + &state.print_tx, + &format!("eth: handling eth_action {eth_action:?}"), + ) + .await; + // for each incoming action, we need to assign a provider from our map // based on the chain id. once we assign a provider, we can use it for // this request. if the provider is not usable, cycle through options // before returning an error. match eth_action { EthAction::SubscribeLogs { sub_id, .. } => { - tokio::spawn(create_new_subscription( + tokio::spawn(subscription::create_new_subscription( state.our.to_string(), km.id, km.source.clone(), @@ -362,27 +422,7 @@ async fn handle_eth_action( .entry(km.source) .or_insert(HashMap::new()); if let Some(sub) = sub_map.remove(&sub_id) { - match sub { - ActiveSub::Local(handle) => { - handle.abort(); - } - ActiveSub::Remote(node) => { - kernel_message( - &state.our, - rand::random(), - Address { - node: node.clone(), - process: ETH_PROCESS_ID.clone(), - }, - None, - true, - Some(60), // TODO - serde_json::to_vec(ð_action).unwrap(), - &state.send_to_loop, - ) - .await; - } - } + sub.close(sub_id, state).await; } } EthAction::Request { .. } => { @@ -434,230 +474,6 @@ async fn handle_eth_action( Ok(()) } -/// cleans itself up when the subscription is closed or fails. -async fn create_new_subscription( - our: String, - km_id: u64, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, - sub_id: u64, - eth_action: EthAction, - providers: Providers, - active_subscriptions: ActiveSubscriptions, - response_channels: ResponseChannels, - print_tx: PrintSender, -) { - verbose_print(&print_tx, "eth: creating new subscription").await; - match build_subscription( - &our, - km_id, - &target, - &rsvp, - &send_to_loop, - ð_action, - &providers, - &response_channels, - &print_tx, - ) - .await - { - Ok((Some(maintain_subscription), None)) => { - // this is a local sub, as in, we connect to the rpc endpt - // send a response to the target that the subscription was successful - kernel_message( - &our, - km_id, - target.clone(), - rsvp.clone(), - false, - None, - EthResponse::Ok, - &send_to_loop, - ) - .await; - let mut subs = active_subscriptions - .entry(target.clone()) - .or_insert(HashMap::new()); - let active_subscriptions = active_subscriptions.clone(); - subs.insert( - sub_id, - ActiveSub::Local(tokio::spawn(async move { - // await the subscription error and kill it if so - if let Err(e) = maintain_subscription.await { - kernel_message( - &our, - rand::random(), - target.clone(), - None, - true, - None, - EthSubResult::Err(EthSubError { - id: sub_id, - error: e, - }), - &send_to_loop, - ) - .await; - active_subscriptions.entry(target).and_modify(|sub_map| { - sub_map.remove(&km_id); - }); - } - })), - ); - } - Ok((None, Some(provider_node))) => { - // this is a remote sub - let mut subs = active_subscriptions - .entry(target.clone()) - .or_insert(HashMap::new()); - subs.insert(sub_id, ActiveSub::Remote(provider_node)); - } - Err(e) => { - error_message(&our, km_id, target.clone(), e, &send_to_loop).await; - } - _ => panic!(), - } -} - -async fn build_subscription( - our: &str, - km_id: u64, - target: &Address, - rsvp: &Option
, - send_to_loop: &MessageSender, - eth_action: &EthAction, - providers: &Providers, - response_channels: &ResponseChannels, - print_tx: &PrintSender, -) -> Result< - ( - // this is dumb, sorry - Option>>, - Option, - ), - EthError, -> { - let EthAction::SubscribeLogs { - sub_id, - chain_id, - kind, - params, - } = eth_action - else { - return Err(EthError::PermissionDenied); // will never hit - }; - let Some(mut aps) = providers.get_mut(&chain_id) else { - return Err(EthError::NoRpcForChain); - }; - // first, try any url providers we have for this chain, - // then if we have none or they all fail, go to node providers. - // finally, if no provider works, return an error. - for url_provider in &mut aps.urls { - let pubsub = match &url_provider.pubsub { - Some(pubsub) => pubsub, - None => { - if let Ok(()) = activate_url_provider(url_provider).await { - verbose_print(print_tx, "eth: activated a url provider").await; - url_provider.pubsub.as_ref().unwrap() - } else { - continue; - } - } - }; - let kind = serde_json::to_value(&kind).unwrap(); - let params = serde_json::to_value(¶ms).unwrap(); - if let Ok(id) = pubsub - .inner() - .prepare("eth_subscribe", [kind, params]) - .await - { - let rx = pubsub.inner().get_raw_subscription(id).await; - return Ok(( - Some(maintain_subscription( - our.to_string(), - *sub_id, - rx, - target.clone(), - rsvp.clone(), - send_to_loop.clone(), - )), - None, - )); - } - // this provider failed and needs to be reset - url_provider.pubsub = None; - } - // now we need a response channel - let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); - response_channels.insert(km_id, sender); - for node_provider in &mut aps.nodes { - match forward_to_node_provider( - &our, - km_id, - rsvp.clone(), - node_provider, - eth_action.clone(), - &send_to_loop, - &mut response_receiver, - ) - .await - { - EthResponse::Ok => { - kernel_message( - &our, - km_id, - target.clone(), - None, - false, - None, - EthResponse::Ok, - &send_to_loop, - ) - .await; - response_channels.remove(&km_id); - return Ok((None, Some(node_provider.name.clone()))); - } - EthResponse::Response { .. } => { - // the response to a SubscribeLogs request must be an 'ok' - node_provider.usable = false; - } - EthResponse::Err(e) => { - if e == EthError::RpcMalformedResponse { - node_provider.usable = false; - } - } - } - } - return Err(EthError::NoRpcForChain); -} - -async fn maintain_subscription( - our: String, - sub_id: u64, - mut rx: RawSubscription, - target: Address, - rsvp: Option
, - send_to_loop: MessageSender, -) -> Result<(), String> { - loop { - let value = rx.recv().await.map_err(|e| e.to_string())?; - let result: SubscriptionResult = - serde_json::from_str(value.get()).map_err(|e| e.to_string())?; - kernel_message( - &our, - rand::random(), - target.clone(), - rsvp.clone(), - true, - None, - EthSubResult::Ok(EthSub { id: sub_id, result }), - &send_to_loop, - ) - .await; - } -} - async fn fulfill_request( our: &str, km_id: u64, @@ -793,6 +609,12 @@ async fn handle_eth_config_action( return EthConfigResponse::PermissionDenied; } + verbose_print( + &state.print_tx, + &format!("eth: handling eth_config_action {eth_config_action:?}"), + ) + .await; + // modify our providers and access settings based on config action match eth_config_action { EthConfigAction::AddProvider(provider) => { diff --git a/kinode/src/eth/subscription.rs b/kinode/src/eth/subscription.rs new file mode 100644 index 000000000..3b919a47a --- /dev/null +++ b/kinode/src/eth/subscription.rs @@ -0,0 +1,288 @@ +use crate::eth::*; +use alloy_pubsub::RawSubscription; +use alloy_rpc_types::pubsub::SubscriptionResult; +use anyhow::Result; +use std::collections::HashMap; + +/// cleans itself up when the subscription is closed or fails. +pub async fn create_new_subscription( + our: String, + km_id: u64, + target: Address, + rsvp: Option
, + send_to_loop: MessageSender, + sub_id: u64, + eth_action: EthAction, + providers: Providers, + active_subscriptions: ActiveSubscriptions, + response_channels: ResponseChannels, + print_tx: PrintSender, +) { + verbose_print(&print_tx, "eth: creating new subscription").await; + match build_subscription( + &our, + km_id, + &target, + &rsvp, + &send_to_loop, + ð_action, + &providers, + &response_channels, + &print_tx, + ) + .await + { + Ok(maybe_raw_sub) => { + // send a response to the target that the subscription was successful + kernel_message( + &our, + km_id, + target.clone(), + rsvp.clone(), + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + let mut subs = active_subscriptions + .entry(target.clone()) + .or_insert(HashMap::new()); + let active_subscriptions = active_subscriptions.clone(); + subs.insert( + sub_id, + match maybe_raw_sub { + Ok(rx) => { + // this is a local sub, as in, we connect to the rpc endpt + ActiveSub::Local(tokio::spawn(async move { + // await the subscription error and kill it if so + if let Err(e) = maintain_local_subscription( + &our, + sub_id, + rx, + &target, + &rsvp, + &send_to_loop, + ) + .await + { + kernel_message( + &our, + rand::random(), + target.clone(), + None, + true, + None, + EthSubResult::Err(e), + &send_to_loop, + ) + .await; + active_subscriptions.entry(target).and_modify(|sub_map| { + sub_map.remove(&km_id); + }); + } + })) + } + Err(provider_node) => { + // this is a remote sub, given by a relay node + let (sender, rx) = tokio::sync::mpsc::channel(10); + ActiveSub::Remote { + provider_node, + handle: tokio::spawn(async move { + if let Err(e) = maintain_remote_subscription( + &our, + sub_id, + rx, + &target, + &send_to_loop, + ) + .await + { + kernel_message( + &our, + rand::random(), + target.clone(), + None, + true, + None, + EthSubResult::Err(e), + &send_to_loop, + ) + .await; + active_subscriptions.entry(target).and_modify(|sub_map| { + sub_map.remove(&km_id); + }); + } + }), + sender, + } + } + }, + ); + } + Err(e) => { + error_message(&our, km_id, target.clone(), e, &send_to_loop).await; + } + } +} + +/// terrible abuse of result in return type, yes, sorry +async fn build_subscription( + our: &str, + km_id: u64, + target: &Address, + rsvp: &Option
, + send_to_loop: &MessageSender, + eth_action: &EthAction, + providers: &Providers, + response_channels: &ResponseChannels, + print_tx: &PrintSender, +) -> Result, EthError> { + let EthAction::SubscribeLogs { + chain_id, + kind, + params, + .. + } = eth_action + else { + return Err(EthError::PermissionDenied); // will never hit + }; + let Some(mut aps) = providers.get_mut(&chain_id) else { + return Err(EthError::NoRpcForChain); + }; + // first, try any url providers we have for this chain, + // then if we have none or they all fail, go to node providers. + // finally, if no provider works, return an error. + for url_provider in &mut aps.urls { + let pubsub = match &url_provider.pubsub { + Some(pubsub) => pubsub, + None => { + if let Ok(()) = activate_url_provider(url_provider).await { + verbose_print(print_tx, "eth: activated a url provider").await; + url_provider.pubsub.as_ref().unwrap() + } else { + continue; + } + } + }; + let kind = serde_json::to_value(&kind).unwrap(); + let params = serde_json::to_value(¶ms).unwrap(); + if let Ok(id) = pubsub + .inner() + .prepare("eth_subscribe", [kind, params]) + .await + { + let rx = pubsub.inner().get_raw_subscription(id).await; + return Ok(Ok(rx)); + } + // this provider failed and needs to be reset + url_provider.pubsub = None; + } + // now we need a response channel + let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); + response_channels.insert(km_id, sender); + for node_provider in &mut aps.nodes { + match forward_to_node_provider( + &our, + km_id, + rsvp.clone(), + node_provider, + eth_action.clone(), + &send_to_loop, + &mut response_receiver, + ) + .await + { + EthResponse::Ok => { + kernel_message( + &our, + km_id, + target.clone(), + None, + false, + None, + EthResponse::Ok, + &send_to_loop, + ) + .await; + response_channels.remove(&km_id); + return Ok(Err(node_provider.name.clone())); + } + EthResponse::Response { .. } => { + // the response to a SubscribeLogs request must be an 'ok' + node_provider.usable = false; + } + EthResponse::Err(e) => { + if e == EthError::RpcMalformedResponse { + node_provider.usable = false; + } + } + } + } + return Err(EthError::NoRpcForChain); +} + +async fn maintain_local_subscription( + our: &str, + sub_id: u64, + mut rx: RawSubscription, + target: &Address, + rsvp: &Option
, + send_to_loop: &MessageSender, +) -> Result<(), EthSubError> { + while let Ok(value) = rx.recv().await { + let result: SubscriptionResult = + serde_json::from_str(value.get()).map_err(|e| EthSubError { + id: sub_id, + error: e.to_string(), + })?; + kernel_message( + our, + rand::random(), + target.clone(), + rsvp.clone(), + true, + None, + EthSubResult::Ok(EthSub { id: sub_id, result }), + &send_to_loop, + ) + .await; + } + Err(EthSubError { + id: sub_id, + error: "subscription closed unexpectedly".to_string(), + }) +} + +async fn maintain_remote_subscription( + our: &str, + sub_id: u64, + mut rx: tokio::sync::mpsc::Receiver, + target: &Address, + send_to_loop: &MessageSender, +) -> Result<(), EthSubError> { + while let Some(incoming) = rx.recv().await { + match incoming { + EthSubResult::Ok(_) => { + kernel_message( + &our, + rand::random(), + target.clone(), + None, + true, + None, + incoming, + &send_to_loop, + ) + .await; + } + EthSubResult::Err(e) => { + return Err(e); + } + } + } + Err(EthSubError { + id: sub_id, + error: "subscription closed unexpectedly".to_string(), + }) +} diff --git a/kinode/src/kernel/process.rs b/kinode/src/kernel/process.rs index 85dbfbf50..cd25a06b4 100644 --- a/kinode/src/kernel/process.rs +++ b/kinode/src/kernel/process.rs @@ -548,12 +548,12 @@ pub async fn make_process_loop( metadata.our.process.publisher() ); if let Err(e) = fs::create_dir_all(&tmp_path).await { - panic!("failed creating tmp dir! {:?}", e); + panic!("failed creating tmp dir! {:?}", e); // TODO REMOVE } let Ok(wasi_tempdir) = Dir::open_ambient_dir(tmp_path.clone(), wasmtime_wasi::sync::ambient_authority()) else { - panic!("failed to open ambient tmp dir!"); + panic!("failed to open ambient tmp dir!"); // TODO REMOVE }; let wasi = WasiCtxBuilder::new() .preopened_dir( From 76b8d5266b02da2314b0944a336e4183fb7eafa4 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 4 Mar 2024 15:32:10 -0300 Subject: [PATCH 21/23] reconnect to node-provider fully working --- .../packages/app_store/app_store/src/lib.rs | 14 ++- .../kns_indexer/kns_indexer/src/lib.rs | 5 +- kinode/src/eth/mod.rs | 25 ++-- kinode/src/eth/subscription.rs | 114 ++++++++++++++---- 4 files changed, 121 insertions(+), 37 deletions(-) diff --git a/kinode/packages/app_store/app_store/src/lib.rs b/kinode/packages/app_store/app_store/src/lib.rs index 9267b66f7..771f558f5 100644 --- a/kinode/packages/app_store/app_store/src/lib.rs +++ b/kinode/packages/app_store/app_store/src/lib.rs @@ -92,6 +92,7 @@ fn subscribe_to_logs(eth_provider: ð::Provider, filter: eth::Filter) { } } } + println!("app store: subscribed to logs successfully"); } call_init!(init); @@ -134,7 +135,7 @@ fn init(our: Address) { // create new provider for sepolia with request-timeout of 60s // can change, log requests can take quite a long time. - let eth_provider = eth::Provider::new(CHAIN_ID, 30); + let eth_provider = eth::Provider::new(CHAIN_ID, 60); let mut requested_packages: HashMap = HashMap::new(); @@ -228,7 +229,16 @@ fn handle_message( if let Ok(eth::EthSub { result, .. }) = eth_result { handle_eth_sub_event(our, &mut state, result)?; } else { - println!("app store: got eth sub error: {eth_result:?}"); + println!("app store: got eth subscription error"); + // attempt to resubscribe + subscribe_to_logs( + ð_provider, + eth::Filter::new() + .address(eth::Address::from_str(&state.contract_address).unwrap()) + .from_block(state.last_saved_block - 1) + .to_block(eth::BlockNumberOrTag::Latest) + .events(EVENTS), + ); } } Req::Http(incoming) => { diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index 1f3945bfc..b460784c9 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -105,6 +105,7 @@ fn subscribe_to_logs(eth_provider: ð::Provider, from_block: u64, filter: eth: } } } + println!("kns_indexer: subscribed to logs successfully"); } struct Component; @@ -194,7 +195,7 @@ fn main(our: Address, mut state: State) -> anyhow::Result<()> { // 60s timeout -- these calls can take a long time // if they do time out, we try them again - let eth_provider = eth::Provider::new(state.chain_id, 20); + let eth_provider = eth::Provider::new(state.chain_id, 60); // if block in state is < current_block, get logs from that part. if state.block < eth_provider.get_block_number().unwrap_or(u64::MAX) { @@ -303,7 +304,7 @@ fn handle_eth_message( } } Err(e) => { - println!("kns_indexer: got sub error, resubscribing.. {:?}", e.error); + println!("kns_indexer: got eth subscription error"); subscribe_to_logs(ð_provider, state.block - 1, filter.clone()); } } diff --git a/kinode/src/eth/mod.rs b/kinode/src/eth/mod.rs index f8597cb6b..4e9a3296e 100644 --- a/kinode/src/eth/mod.rs +++ b/kinode/src/eth/mod.rs @@ -21,6 +21,7 @@ enum IncomingReq { EthAction(EthAction), EthConfigAction(EthConfigAction), EthSubResult(EthSubResult), + SubKeepalive(u64), } /// mapping of chain id to ordered lists of providers @@ -262,6 +263,7 @@ async fn handle_network_error( if let Some(chan) = response_channels.get(&wrapped_error.id) { // can't close channel here, as response may be an error // and fulfill_request may wish to try other providers. + verbose_print(&print_tx, "eth: sent network error to response channel").await; let _ = chan.send(Err(wrapped_error)).await; } } @@ -289,11 +291,7 @@ async fn handle_message( } } Message::Request(req) => { - let Some(timeout) = req.expects_response else { - // if they don't want a response, we don't need to do anything - // might as well throw it away - return Err(EthError::MalformedRequest); - }; + let timeout = req.expects_response.unwrap_or(60); let Ok(req) = serde_json::from_slice::(&req.body) else { return Err(EthError::MalformedRequest); }; @@ -356,6 +354,15 @@ async fn handle_message( ) .await; } + IncomingReq::SubKeepalive(sub_id) => { + // source expects that we have a local sub for them with this id + // if we do, no action required, otherwise, throw them an error. + if let Some(sub_map) = state.active_subscriptions.get(&km.source) { + if sub_map.contains_key(&sub_id) { + return Ok(()); + } + } + } } } } @@ -523,7 +530,6 @@ async fn fulfill_request( let response = forward_to_node_provider( our, km_id, - None, // no rsvp needed for a discrete request node_provider, eth_action.clone(), send_to_loop, @@ -545,7 +551,6 @@ async fn fulfill_request( async fn forward_to_node_provider( our: &str, km_id: u64, - rsvp: Option
, node_provider: &NodeProvider, eth_action: EthAction, send_to_loop: &MessageSender, @@ -563,14 +568,16 @@ async fn forward_to_node_provider( node: node_provider.name.clone(), process: ETH_PROCESS_ID.clone(), }, - rsvp, + None, true, Some(60), // TODO eth_action.clone(), &send_to_loop, ) .await; - let Some(Ok(response_km)) = receiver.recv().await else { + let Ok(Some(Ok(response_km))) = + tokio::time::timeout(std::time::Duration::from_secs(30), receiver.recv()).await + else { return EthResponse::Err(EthError::RpcTimeout); }; let Message::Response((resp, _context)) = response_km.message else { diff --git a/kinode/src/eth/subscription.rs b/kinode/src/eth/subscription.rs index 3b919a47a..66d22f0bb 100644 --- a/kinode/src/eth/subscription.rs +++ b/kinode/src/eth/subscription.rs @@ -23,7 +23,6 @@ pub async fn create_new_subscription( &our, km_id, &target, - &rsvp, &send_to_loop, ð_action, &providers, @@ -66,6 +65,11 @@ pub async fn create_new_subscription( ) .await { + verbose_print( + &print_tx, + "eth: closed local subscription due to error", + ) + .await; kernel_message( &our, rand::random(), @@ -83,21 +87,34 @@ pub async fn create_new_subscription( } })) } - Err(provider_node) => { + Err((provider_node, remote_sub_id)) => { // this is a remote sub, given by a relay node let (sender, rx) = tokio::sync::mpsc::channel(10); + let keepalive_km_id = rand::random(); + let (keepalive_err_sender, keepalive_err_receiver) = + tokio::sync::mpsc::channel(1); + response_channels.insert(keepalive_km_id, keepalive_err_sender); ActiveSub::Remote { - provider_node, + provider_node: provider_node.clone(), handle: tokio::spawn(async move { if let Err(e) = maintain_remote_subscription( &our, + &provider_node, + remote_sub_id, sub_id, + keepalive_km_id, rx, + keepalive_err_receiver, &target, &send_to_loop, ) .await { + verbose_print( + &print_tx, + "eth: closed subscription with provider node due to error", + ) + .await; kernel_message( &our, rand::random(), @@ -110,8 +127,9 @@ pub async fn create_new_subscription( ) .await; active_subscriptions.entry(target).and_modify(|sub_map| { - sub_map.remove(&km_id); + sub_map.remove(&sub_id); }); + response_channels.remove(&keepalive_km_id); } }), sender, @@ -131,13 +149,12 @@ async fn build_subscription( our: &str, km_id: u64, target: &Address, - rsvp: &Option
, send_to_loop: &MessageSender, eth_action: &EthAction, providers: &Providers, response_channels: &ResponseChannels, print_tx: &PrintSender, -) -> Result, EthError> { +) -> Result, EthError> { let EthAction::SubscribeLogs { chain_id, kind, @@ -181,13 +198,20 @@ async fn build_subscription( // now we need a response channel let (sender, mut response_receiver) = tokio::sync::mpsc::channel(1); response_channels.insert(km_id, sender); + // we need to create our own unique sub id because in the remote provider node, + // all subs will be identified under our process address. + let remote_sub_id = rand::random(); for node_provider in &mut aps.nodes { match forward_to_node_provider( &our, km_id, - rsvp.clone(), node_provider, - eth_action.clone(), + EthAction::SubscribeLogs { + sub_id: remote_sub_id, + chain_id: chain_id.clone(), + kind: kind.clone(), + params: params.clone(), + }, &send_to_loop, &mut response_receiver, ) @@ -206,7 +230,7 @@ async fn build_subscription( ) .await; response_channels.remove(&km_id); - return Ok(Err(node_provider.name.clone())); + return Ok(Err((node_provider.name.clone(), remote_sub_id))); } EthResponse::Response { .. } => { // the response to a SubscribeLogs request must be an 'ok' @@ -254,35 +278,77 @@ async fn maintain_local_subscription( }) } +/// handle the subscription updates from a remote provider, +/// and also perform keepalive checks on that provider. +/// current keepalive is 30s, this can be adjusted as desired async fn maintain_remote_subscription( our: &str, + provider_node: &str, + remote_sub_id: u64, sub_id: u64, + keepalive_km_id: u64, mut rx: tokio::sync::mpsc::Receiver, + mut net_error_rx: ProcessMessageReceiver, target: &Address, send_to_loop: &MessageSender, ) -> Result<(), EthSubError> { - while let Some(incoming) = rx.recv().await { - match incoming { - EthSubResult::Ok(_) => { + let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(30)); + loop { + tokio::select! { + incoming = rx.recv() => { + match incoming { + Some(EthSubResult::Ok(upd)) => { + kernel_message( + &our, + rand::random(), + target.clone(), + None, + true, + None, + EthSubResult::Ok(EthSub { + id: sub_id, + result: upd.result, + }), + &send_to_loop, + ) + .await; + } + Some(EthSubResult::Err(e)) => { + return Err(EthSubError { + id: sub_id, + error: e.error, + }); + } + None => { + return Err(EthSubError { + id: sub_id, + error: "subscription closed unexpectedly".to_string(), + }); + + } + } + } + _ = interval.tick() => { + // perform keepalive kernel_message( &our, - rand::random(), - target.clone(), + keepalive_km_id, + Address { node: provider_node.to_string(), process: ETH_PROCESS_ID.clone() }, None, true, - None, - incoming, + Some(30), + IncomingReq::SubKeepalive(remote_sub_id), &send_to_loop, - ) - .await; + ).await; } - EthSubResult::Err(e) => { - return Err(e); + incoming = net_error_rx.recv() => { + if let Some(Err(_net_error)) = incoming { + return Err(EthSubError { + id: sub_id, + error: "subscription node-provider failed keepalive".to_string(), + }); + } } } } - Err(EthSubError { - id: sub_id, - error: "subscription closed unexpectedly".to_string(), - }) } From 96750b77df196065ba3db74d16f9723018bbd6e8 Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 4 Mar 2024 16:21:09 -0300 Subject: [PATCH 22/23] update process_lib deps, add timeout to wasi tempdir creation (hitting this, weirdly?) --- Cargo.lock | 150 +++++------------- .../packages/app_store/app_store/Cargo.toml | 2 +- .../packages/app_store/app_store/src/types.rs | 3 + kinode/packages/app_store/download/Cargo.toml | 2 +- .../packages/app_store/ft_worker/Cargo.toml | 2 +- kinode/packages/app_store/install/Cargo.toml | 2 +- .../packages/app_store/uninstall/Cargo.toml | 2 +- kinode/packages/chess/chess/Cargo.toml | 2 +- kinode/packages/homepage/homepage/Cargo.toml | 2 +- .../packages/kns_indexer/get_block/Cargo.toml | 2 +- .../kns_indexer/kns_indexer/Cargo.toml | 2 +- .../kns_indexer/kns_indexer/src/lib.rs | 2 +- kinode/packages/terminal/alias/Cargo.toml | 2 +- kinode/packages/terminal/cat/Cargo.toml | 2 +- kinode/packages/terminal/echo/Cargo.toml | 2 +- kinode/packages/terminal/hi/Cargo.toml | 2 +- kinode/packages/terminal/m/Cargo.toml | 2 +- kinode/packages/terminal/m/src/lib.rs | 2 +- kinode/packages/terminal/terminal/Cargo.toml | 2 +- kinode/packages/terminal/terminal/src/lib.rs | 1 - kinode/packages/terminal/top/Cargo.toml | 2 +- kinode/packages/tester/test_runner/Cargo.toml | 2 +- kinode/packages/tester/test_runner/src/lib.rs | 2 +- kinode/packages/tester/tester/Cargo.toml | 2 +- kinode/src/kernel/process.rs | 40 +++-- 25 files changed, 83 insertions(+), 153 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 80c791a4c..872514bc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,7 +78,7 @@ name = "alias" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -101,17 +101,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "alloy-json-rpc" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" -dependencies = [ - "alloy-primitives", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "alloy-json-rpc" version = "0.1.0" @@ -129,7 +118,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-eips", - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-json-rpc", "alloy-primitives", "alloy-rlp", "serde", @@ -166,8 +155,8 @@ dependencies = [ "alloy-primitives", "alloy-rpc-client", "alloy-rpc-trace-types", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-rpc-types", + "alloy-transport", "alloy-transport-http", "async-trait", "auto_impl", @@ -181,9 +170,9 @@ name = "alloy-pubsub" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-json-rpc", "alloy-primitives", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport", "bimap", "futures", "serde", @@ -220,10 +209,10 @@ name = "alloy-rpc-client" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-json-rpc", "alloy-primitives", "alloy-pubsub", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport", "alloy-transport-http", "alloy-transport-ws", "futures", @@ -242,24 +231,11 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-rpc-types", "serde", "serde_json", ] -[[package]] -name = "alloy-rpc-types" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" -dependencies = [ - "alloy-primitives", - "alloy-rlp", - "itertools 0.12.1", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "alloy-rpc-types" version = "0.1.0" @@ -303,28 +279,12 @@ dependencies = [ "serde", ] -[[package]] -name = "alloy-transport" -version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=098ad56#098ad5657d55bbc5fe9469ede2a9ca79def738f2" -dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", - "base64 0.21.7", - "serde", - "serde_json", - "thiserror", - "tokio", - "tower", - "url", - "wasm-bindgen-futures", -] - [[package]] name = "alloy-transport" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-json-rpc", "base64 0.21.7", "futures-util", "serde", @@ -341,8 +301,8 @@ name = "alloy-transport-http" version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-json-rpc", + "alloy-transport", "reqwest", "serde_json", "tower", @@ -355,7 +315,7 @@ version = "0.1.0" source = "git+https://github.com/alloy-rs/alloy?rev=6f8ebb4#6f8ebb45afca1a201a11d421ec46db0f7a1d8d08" dependencies = [ "alloy-pubsub", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-transport", "futures", "http 0.2.11", "serde_json", @@ -448,7 +408,7 @@ dependencies = [ "alloy-sol-types", "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=9838f5d)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "rand 0.8.5", "serde", "serde_json", @@ -935,7 +895,7 @@ name = "cat" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -997,7 +957,7 @@ dependencies = [ "anyhow", "base64 0.13.1", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=3232423)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "pleco", "serde", "serde_json", @@ -1632,7 +1592,7 @@ name = "download" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -1663,7 +1623,7 @@ name = "echo" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -1863,7 +1823,7 @@ version = "0.2.0" dependencies = [ "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "rand 0.8.5", "serde", "serde_json", @@ -2008,7 +1968,7 @@ dependencies = [ name = "get_block" version = "0.1.0" dependencies = [ - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=9838f5d)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -2183,7 +2143,7 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" name = "hi" version = "0.1.0" dependencies = [ - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -2213,7 +2173,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -2439,7 +2399,7 @@ name = "install" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -2604,7 +2564,7 @@ dependencies = [ "alloy-providers", "alloy-pubsub", "alloy-rpc-client", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-rpc-types", "alloy-transport-ws", "anyhow", "async-trait", @@ -2675,50 +2635,12 @@ dependencies = [ [[package]] name = "kinode_process_lib" version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=12bf9ee#12bf9eefeb9237db5e5165647fa91b437b05e169" -dependencies = [ - "anyhow", - "bincode", - "http 1.0.0", - "mime_guess", - "rand 0.8.5", - "serde", - "serde_json", - "thiserror", - "url", - "wit-bindgen", -] - -[[package]] -name = "kinode_process_lib" -version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=3232423#323242399efdcdad02e7f31bb6a9cc5eec048610" -dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", - "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=098ad56)", - "anyhow", - "bincode", - "http 1.0.0", - "mime_guess", - "rand 0.8.5", - "serde", - "serde_json", - "thiserror", - "url", - "wit-bindgen", -] - -[[package]] -name = "kinode_process_lib" -version = "0.6.0" -source = "git+https://github.com/kinode-dao/process_lib?rev=9838f5d#9838f5d1cb0b2b6d63ad4d82c9628ff3e8d33dff" +source = "git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2#f6a2bdab370da88488b210f4dc92b715b9c0e4b2" dependencies = [ - "alloy-json-rpc 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-json-rpc", "alloy-primitives", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", - "alloy-transport 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-rpc-types", + "alloy-transport", "anyhow", "bincode", "http 1.0.0", @@ -2786,7 +2708,7 @@ dependencies = [ "anyhow", "bincode", "hex", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=9838f5d)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "rmp-serde", "serde", "serde_json", @@ -2816,7 +2738,7 @@ name = "lib" version = "0.6.0" dependencies = [ "alloy-pubsub", - "alloy-rpc-types 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=6f8ebb4)", + "alloy-rpc-types", "lazy_static", "rand 0.8.5", "reqwest", @@ -2967,7 +2889,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "regex", "serde", "serde_json", @@ -4653,7 +4575,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "rand 0.8.5", "regex", "serde", @@ -4667,7 +4589,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "thiserror", @@ -4681,7 +4603,7 @@ dependencies = [ "anyhow", "bincode", "indexmap", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "thiserror", @@ -4899,7 +4821,7 @@ name = "top" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", @@ -5130,7 +5052,7 @@ name = "uninstall" version = "0.1.0" dependencies = [ "anyhow", - "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?rev=12bf9ee)", + "kinode_process_lib 0.6.0 (git+https://github.com/kinode-dao/process_lib?tag=v0.6.0-alpha.2)", "serde", "serde_json", "wit-bindgen", diff --git a/kinode/packages/app_store/app_store/Cargo.toml b/kinode/packages/app_store/app_store/Cargo.toml index a215763fd..13f182c46 100644 --- a/kinode/packages/app_store/app_store/Cargo.toml +++ b/kinode/packages/app_store/app_store/Cargo.toml @@ -9,7 +9,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "9838f5d" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/app_store/app_store/src/types.rs b/kinode/packages/app_store/app_store/src/types.rs index 1d6adea24..f4a49e47e 100644 --- a/kinode/packages/app_store/app_store/src/types.rs +++ b/kinode/packages/app_store/app_store/src/types.rs @@ -206,6 +206,7 @@ impl State { let manifest_file = vfs::File { path: format!("/{}/pkg/manifest.json", package_id), + timeout: 5, }; let manifest_bytes = manifest_file.read()?; let manifest_hash = generate_metadata_hash(&manifest_bytes); @@ -284,6 +285,7 @@ impl State { if entry.file_type == vfs::FileType::Directory { let zip_file = vfs::File { path: format!("/{}/pkg/{}.zip", package_id, package_id), + timeout: 5, }; let Ok(zip_file_bytes) = zip_file.read() else { continue; @@ -293,6 +295,7 @@ impl State { let our_version = generate_version_hash(&zip_file_bytes); let manifest_file = vfs::File { path: format!("/{}/pkg/manifest.json", package_id), + timeout: 5, }; let manifest_bytes = manifest_file.read()?; // the user will need to turn mirroring and auto-update back on if they diff --git a/kinode/packages/app_store/download/Cargo.toml b/kinode/packages/app_store/download/Cargo.toml index 07bb716df..75e7ca1f5 100644 --- a/kinode/packages/app_store/download/Cargo.toml +++ b/kinode/packages/app_store/download/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/app_store/ft_worker/Cargo.toml b/kinode/packages/app_store/ft_worker/Cargo.toml index 3171678b8..7ec470781 100644 --- a/kinode/packages/app_store/ft_worker/Cargo.toml +++ b/kinode/packages/app_store/ft_worker/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/app_store/install/Cargo.toml b/kinode/packages/app_store/install/Cargo.toml index 28f5e6e15..f12bf75d5 100644 --- a/kinode/packages/app_store/install/Cargo.toml +++ b/kinode/packages/app_store/install/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/app_store/uninstall/Cargo.toml b/kinode/packages/app_store/uninstall/Cargo.toml index 020dc4917..f7ebb6b54 100644 --- a/kinode/packages/app_store/uninstall/Cargo.toml +++ b/kinode/packages/app_store/uninstall/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/chess/chess/Cargo.toml b/kinode/packages/chess/chess/Cargo.toml index 385da11d5..255dd1754 100644 --- a/kinode/packages/chess/chess/Cargo.toml +++ b/kinode/packages/chess/chess/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" anyhow = "1.0" base64 = "0.13" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "3232423" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } pleco = "0.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/homepage/homepage/Cargo.toml b/kinode/packages/homepage/homepage/Cargo.toml index bb9a8e409..d9d0d1c4f 100644 --- a/kinode/packages/homepage/homepage/Cargo.toml +++ b/kinode/packages/homepage/homepage/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/kns_indexer/get_block/Cargo.toml b/kinode/packages/kns_indexer/get_block/Cargo.toml index 330327c51..8ae30de57 100644 --- a/kinode/packages/kns_indexer/get_block/Cargo.toml +++ b/kinode/packages/kns_indexer/get_block/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "9838f5d" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml index b0aae5876..8edb58bde 100644 --- a/kinode/packages/kns_indexer/kns_indexer/Cargo.toml +++ b/kinode/packages/kns_indexer/kns_indexer/Cargo.toml @@ -10,7 +10,7 @@ alloy-primitives = "0.6.2" alloy-sol-types = "0.6.2" bincode = "1.3.3" hex = "0.4.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "9838f5d" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs index b460784c9..58b4cdf41 100644 --- a/kinode/packages/kns_indexer/kns_indexer/src/lib.rs +++ b/kinode/packages/kns_indexer/kns_indexer/src/lib.rs @@ -303,7 +303,7 @@ fn handle_eth_message( handle_log(our, state, &log)?; } } - Err(e) => { + Err(_e) => { println!("kns_indexer: got eth subscription error"); subscribe_to_logs(ð_provider, state.block - 1, filter.clone()); } diff --git a/kinode/packages/terminal/alias/Cargo.toml b/kinode/packages/terminal/alias/Cargo.toml index 7213f3e2a..318269cc7 100644 --- a/kinode/packages/terminal/alias/Cargo.toml +++ b/kinode/packages/terminal/alias/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/terminal/cat/Cargo.toml b/kinode/packages/terminal/cat/Cargo.toml index f52ebb846..a5dbc8113 100644 --- a/kinode/packages/terminal/cat/Cargo.toml +++ b/kinode/packages/terminal/cat/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/terminal/echo/Cargo.toml b/kinode/packages/terminal/echo/Cargo.toml index 79c526172..36f9a32a1 100644 --- a/kinode/packages/terminal/echo/Cargo.toml +++ b/kinode/packages/terminal/echo/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/terminal/hi/Cargo.toml b/kinode/packages/terminal/hi/Cargo.toml index 2fcb9145b..7bcec5c3d 100644 --- a/kinode/packages/terminal/hi/Cargo.toml +++ b/kinode/packages/terminal/hi/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/terminal/m/Cargo.toml b/kinode/packages/terminal/m/Cargo.toml index a0f3080a8..372e286b5 100644 --- a/kinode/packages/terminal/m/Cargo.toml +++ b/kinode/packages/terminal/m/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0" clap = "4.4.18" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } regex = "1.10.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/kinode/packages/terminal/m/src/lib.rs b/kinode/packages/terminal/m/src/lib.rs index 4a0d8db0b..c202d64cb 100644 --- a/kinode/packages/terminal/m/src/lib.rs +++ b/kinode/packages/terminal/m/src/lib.rs @@ -1,6 +1,6 @@ use clap::{Arg, Command}; use kinode_process_lib::{ - await_next_request_body, call_init, println, Address, Request, Response, SendErrorKind, + await_next_request_body, call_init, println, Address, Request, SendErrorKind, }; use regex::Regex; diff --git a/kinode/packages/terminal/terminal/Cargo.toml b/kinode/packages/terminal/terminal/Cargo.toml index 1980a8004..a29805a35 100644 --- a/kinode/packages/terminal/terminal/Cargo.toml +++ b/kinode/packages/terminal/terminal/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } rand = "0.8" regex = "1.10.3" serde = { version = "1.0", features = ["derive"] } diff --git a/kinode/packages/terminal/terminal/src/lib.rs b/kinode/packages/terminal/terminal/src/lib.rs index 6c924b925..03994985e 100644 --- a/kinode/packages/terminal/terminal/src/lib.rs +++ b/kinode/packages/terminal/terminal/src/lib.rs @@ -5,7 +5,6 @@ use kinode_process_lib::{ get_blob, get_typed_state, our_capabilities, print_to_terminal, println, set_state, vfs, Address, Capability, ProcessId, Request, }; -use regex::Regex; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; diff --git a/kinode/packages/terminal/top/Cargo.toml b/kinode/packages/terminal/top/Cargo.toml index ccf37c9b8..1bfff6883 100644 --- a/kinode/packages/terminal/top/Cargo.toml +++ b/kinode/packages/terminal/top/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" wit-bindgen = { git = "https://github.com/bytecodealliance/wit-bindgen", rev = "21a46c7" } diff --git a/kinode/packages/tester/test_runner/Cargo.toml b/kinode/packages/tester/test_runner/Cargo.toml index 13af06401..d4423d2b6 100644 --- a/kinode/packages/tester/test_runner/Cargo.toml +++ b/kinode/packages/tester/test_runner/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0" bincode = "1.3.3" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" diff --git a/kinode/packages/tester/test_runner/src/lib.rs b/kinode/packages/tester/test_runner/src/lib.rs index b4ce5b399..e29d502d2 100644 --- a/kinode/packages/tester/test_runner/src/lib.rs +++ b/kinode/packages/tester/test_runner/src/lib.rs @@ -84,7 +84,7 @@ fn handle_message(our: &Address) -> anyhow::Result<()> { None => std::collections::HashMap::new(), Some(caps_index) => { children.remove(caps_index); - let file = vfs::file::open_file(&caps_file_path, false)?; + let file = vfs::file::open_file(&caps_file_path, false, None)?; let file_contents = file.read()?; serde_json::from_slice(&file_contents)? } diff --git a/kinode/packages/tester/tester/Cargo.toml b/kinode/packages/tester/tester/Cargo.toml index 49e2095ee..69ce7280c 100644 --- a/kinode/packages/tester/tester/Cargo.toml +++ b/kinode/packages/tester/tester/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" anyhow = "1.0" bincode = "1.3.3" indexmap = "2.1" -kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", rev = "12bf9ee" } +kinode_process_lib = { git = "https://github.com/kinode-dao/process_lib", tag = "v0.6.0-alpha.2" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" diff --git a/kinode/src/kernel/process.rs b/kinode/src/kernel/process.rs index cd25a06b4..66e7147ed 100644 --- a/kinode/src/kernel/process.rs +++ b/kinode/src/kernel/process.rs @@ -547,24 +547,30 @@ pub async fn make_process_loop( metadata.our.process.package(), metadata.our.process.publisher() ); - if let Err(e) = fs::create_dir_all(&tmp_path).await { - panic!("failed creating tmp dir! {:?}", e); // TODO REMOVE + + let mut wasi = WasiCtxBuilder::new(); + + // TODO make guarantees about this + if let Ok(Ok(())) = tokio::time::timeout( + std::time::Duration::from_secs(5), + fs::create_dir_all(&tmp_path), + ) + .await + { + if let Ok(wasi_tempdir) = + Dir::open_ambient_dir(tmp_path.clone(), wasmtime_wasi::sync::ambient_authority()) + { + wasi.preopened_dir( + wasi_tempdir, + DirPerms::all(), + FilePerms::all(), + tmp_path.clone(), + ) + .env("TEMP_DIR", tmp_path); + } } - let Ok(wasi_tempdir) = - Dir::open_ambient_dir(tmp_path.clone(), wasmtime_wasi::sync::ambient_authority()) - else { - panic!("failed to open ambient tmp dir!"); // TODO REMOVE - }; - let wasi = WasiCtxBuilder::new() - .preopened_dir( - wasi_tempdir, - DirPerms::all(), - FilePerms::all(), - tmp_path.clone(), - ) - .env("TEMP_DIR", tmp_path) - .stderr(wasi_stderr.clone()) - .build(); + + let wasi = wasi.stderr(wasi_stderr.clone()).build(); wasmtime_wasi::preview2::command::add_to_linker(&mut linker).unwrap(); From 1506979bdac7c7da8727ea8c9f28fa802af04e0b Mon Sep 17 00:00:00 2001 From: dr-frmr Date: Mon, 4 Mar 2024 16:21:38 -0300 Subject: [PATCH 23/23] remove unused import --- kinode/packages/terminal/cat/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kinode/packages/terminal/cat/src/lib.rs b/kinode/packages/terminal/cat/src/lib.rs index 373a2b79f..e538dddc4 100644 --- a/kinode/packages/terminal/cat/src/lib.rs +++ b/kinode/packages/terminal/cat/src/lib.rs @@ -1,5 +1,5 @@ use kinode_process_lib::{ - await_next_request_body, call_init, get_blob, println, vfs, Address, Request, Response, + await_next_request_body, call_init, get_blob, println, vfs, Address, Request, }; wit_bindgen::generate!({