Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
ce31302
First UI pieces
dolled-possum Sep 15, 2025
033c61e
Adding to custom and reset, plumbing thru mint
dolled-possum Sep 15, 2025
2fdb183
debug output, setting routers before mint
dolled-possum Sep 16, 2025
9b22763
Passing routers in the boot payload
dolled-possum Sep 17, 2025
62e224c
cleaning up console logging
dolled-possum Sep 17, 2025
3c93c96
missed some console logging
dolled-possum Sep 17, 2025
68fea1b
Merge branch 'develop' into dw/router-designation
dolled-possum Sep 17, 2025
1a34d7b
page 2 advanced settings
dolled-possum Sep 24, 2025
a6bfa84
printout of data read from hypermap vfs
dolled-possum Sep 24, 2025
f840cb4
Merge branch 'develop' into dw/router-designation
dolled-possum Sep 24, 2025
2cf90d9
Merge branch 'dw/router-designation' into dw/other-overrides
dolled-possum Sep 24, 2025
137dfb4
Merge branch 'develop' into dw/router-designation
dolled-possum Sep 30, 2025
d3fe4ef
Merge branch 'dw/router-designation' into dw/other-overrides
dolled-possum Sep 30, 2025
bbe1283
set Base L2 Access & Cache Source (boot/login)
dolled-possum Sep 30, 2025
30ca184
Format Rust code using rustfmt
github-actions[bot] Sep 30, 2025
dfdd761
disambiguate Set-Password/Login Advanced Options
dolled-possum Sep 30, 2025
437e4f1
prepopulating cache srcs and base L2 access srcs
dolled-possum Oct 2, 2025
7fde502
Format Rust code using rustfmt
github-actions[bot] Oct 2, 2025
915ad0a
bi-directional data flow for cache and base L2 src
dolled-possum Oct 5, 2025
c7cabc8
Format Rust code using rustfmt
github-actions[bot] Oct 5, 2025
e091c91
remove options_config file and helper code
dolled-possum Oct 5, 2025
b514bec
purge options_config, data.txt -> cache_sourcese
dolled-possum Oct 5, 2025
1708d53
front end validations
dolled-possum Oct 5, 2025
ee29cb3
Format Rust code using rustfmt
github-actions[bot] Oct 5, 2025
063c6ca
initial state, leave checked/unchecked recommend
dolled-possum Oct 7, 2025
4edd112
UI improvements
dolled-possum Oct 7, 2025
33f32b1
don’t autoclear cache-sources on toggle
dolled-possum Oct 7, 2025
a7bb7ea
Format Rust code using rustfmt
github-actions[bot] Oct 7, 2025
00aae21
trivial cleanup
dolled-possum Oct 7, 2025
1f2d7e8
extraneous character
dolled-possum Oct 7, 2025
721c91d
Format Rust code using rustfmt
github-actions[bot] Oct 7, 2025
907cf3f
Merge branch 'develop' into dw/router-designation
dolled-possum Oct 8, 2025
361c8e8
UI changes as per PR review request
dolled-possum Oct 8, 2025
2ca2404
Merge branch 'develop' into dw/router-designation
dolled-possum Oct 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions hyperdrive/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ chrono = "0.4.31"
clap = { version = "4.4", features = ["derive"] }
crossterm = { version = "0.27.0", features = ["event-stream", "bracketed-paste"] }
dashmap = "5.5.3"
dirs = "5.0"
futures = "0.3"
generic-array = "0.14.7"
hex = "0.4.3"
Expand Down
62 changes: 57 additions & 5 deletions hyperdrive/packages/hns-indexer/hns-indexer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@ use alloy_sol_types::SolEvent;
use hyperware::process::standard::clear_state;
use hyperware_process_lib::logging::{debug, error, info, init_logging, warn, Level};
use hyperware_process_lib::{
await_message, call_init, eth, get_state, hypermap, net, set_state, timer, Address, Capability,
Message, Request, Response,
await_message, call_init, eth, get_state, hypermap, net, our, set_state, timer, vfs, Address,
Capability, Message, Request, Response,
};
use std::sync::{Mutex, OnceLock};
use std::{
collections::{BTreeMap, HashMap, HashSet},
net::{IpAddr, Ipv4Addr, Ipv6Addr},
Expand All @@ -31,15 +32,17 @@ const SUBSCRIPTION_TIMEOUT_S: u64 = 60;
const DELAY_MS: u64 = 2_000;
const CHECKPOINT_MS: u64 = 5 * 60 * 1_000; // 5 minutes

static NODES: OnceLock<Mutex<Vec<String>>> = OnceLock::new();

#[cfg(not(feature = "simulation-mode"))]
const DEFAULT_NODES: &[&str] = &[
const DEFAULT_NODES_FALLBACK: &[&str] = &[
"us-cacher-1.hypr",
"eu-cacher-1.hypr",
"nick.hypr",
"nick1udwig.os",
];
#[cfg(feature = "simulation-mode")]
const DEFAULT_NODES: &[&str] = &["fake.os"];
const DEFAULT_NODES_FALLBACK: &[&str] = &["fake.os"];

type PendingNotes = BTreeMap<u64, Vec<(String, String, eth::Bytes, u8)>>;

Expand Down Expand Up @@ -707,6 +710,55 @@ impl From<StateV1> for WitState {
}
}

// Function to get nodes (replaces direct access to DEFAULT_NODES)
fn get_nodes() -> Vec<String> {
NODES.get_or_init(|| {
// Try to read from initfiles drive
match vfs::create_drive(our().package_id(), "initfiles", None) {
Ok(alt_drive_path) => {
match vfs::open_file(&format!("{}/cache_sources", alt_drive_path), false, None) {
Ok(file) => {
match file.read() {
Ok(contents) => {
let content_str = String::from_utf8_lossy(&contents);
info!("Contents of cache_sources: {}", content_str);

// Parse the JSON to get the vector of node names
match serde_json::from_str::<Vec<String>>(&content_str) {
Ok(custom_nodes) => {
if !custom_nodes.is_empty() {
info!("Loading custom nodes: {:?}", custom_nodes);
return Mutex::new(custom_nodes);
} else {
info!("Custom nodes list is empty, using defaults");
}
}
Err(e) => {
info!("Failed to parse cache_sources as JSON: {}, using defaults", e);
}
}
}
Err(e) => {
info!("Failed to read cache_sources: {}, using defaults", e);
}
}
}
Err(e) => {
info!("Failed to open cache_sources: {}, using defaults", e);
}
}
}
Err(e) => {
info!("Failed to create drive: {}, using defaults", e);
}
}

// Fallback to default nodes
let default_nodes: Vec<String> = DEFAULT_NODES_FALLBACK.iter().map(|s| s.to_string()).collect();
Mutex::new(default_nodes)
}).lock().unwrap().clone()
}

fn make_filters() -> (eth::Filter, eth::Filter) {
let hypermap_address = eth::Address::from_str(hypermap::HYPERMAP_ADDRESS).unwrap();
// sub_id: 1
Expand Down Expand Up @@ -805,7 +857,7 @@ fn main(our: &Address, state: &mut StateV1) -> anyhow::Result<()> {
// if block in state is < current_block, get logs from that part.
info!("syncing old logs from block: {}", state.last_block);

let nodes: HashSet<String> = DEFAULT_NODES.iter().map(|s| s.to_string()).collect();
let nodes: HashSet<String> = get_nodes().iter().map(|s| s.to_string()).collect();
state.fetch_and_process_logs(nodes);

// set a timer tick so any pending logs will be processed
Expand Down
101 changes: 100 additions & 1 deletion hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,55 @@ impl State {

// Try to bootstrap from other hypermap-cacher nodes
fn try_bootstrap_from_nodes(&mut self) -> anyhow::Result<()> {
// Create alternate drive for initfiles and read the test data
let alt_drive_path = vfs::create_drive(our().package_id(), "initfiles", None).unwrap();

// Try to read the cache_sources file from the initfiles drive
match vfs::open_file(&format!("{}/cache_sources", alt_drive_path), false, None) {
Ok(file) => {
match file.read() {
Ok(contents) => {
let content_str = String::from_utf8_lossy(&contents);
info!("Contents of cache_sources: {}", content_str);

// Parse the JSON to get the vector of node names
match serde_json::from_str::<Vec<String>>(&content_str) {
Ok(custom_cache_nodes) => {
if !custom_cache_nodes.is_empty() {
info!(
"Loading custom cache source nodes: {:?}",
custom_cache_nodes
);
// Clear existing nodes and add custom ones
self.nodes.clear();
for node_name in custom_cache_nodes {
self.nodes.push(node_name.clone());
}
} else {
info!("Custom cache nodes list is empty, keeping existing node configuration");
}
}
Err(e) => {
info!("Failed to parse cache_sources as JSON: {}, keeping existing node configuration", e);
}
}
}
Err(e) => {
info!(
"Failed to read cache_sources: {}, keeping existing node configuration",
e
);
}
}
}
Err(e) => {
info!(
"Failed to open cache_sources: {}, keeping existing node configuration",
e
);
}
}

if self.nodes.is_empty() {
info!("No nodes configured for bootstrap, will fallback to RPC");
return Err(anyhow::anyhow!("No nodes configured for bootstrap"));
Expand Down Expand Up @@ -693,6 +742,33 @@ impl State {
Err(anyhow::anyhow!("Failed to bootstrap from any node"))
}

// Helper function to write nodes to cache_sources file
fn write_nodes_to_file(&self) -> anyhow::Result<()> {
info!("Beginning of subroutine");
let alt_drive_path = vfs::create_drive(our().package_id(), "initfiles", None)?;
info!("drive path defined");
let nodes_json = serde_json::to_string(&self.nodes)?;
info!("nodes_json defined");
let file_path = format!("{}/cache_sources", alt_drive_path);
info!("file_path defined");

// Open file in write mode which should truncate, but to be safe we'll write exact bytes
let mut file = vfs::open_file(&file_path, true, None)?;

// Get the bytes to write
let bytes = nodes_json.as_bytes();

// Write all bytes
file.write_all(bytes)?;

// Explicitly set the file length to the exact size of what we wrote
// This ensures any old content beyond this point is truncated
file.set_len(bytes.len() as u64)?;

info!("Updated cache_sources with {} nodes", self.nodes.len());
Ok(())
}

// Process received log caches and write them to VFS
fn process_received_log_caches(
&mut self,
Expand Down Expand Up @@ -1189,6 +1265,9 @@ fn handle_request(
}
state.nodes = new_nodes;
state.save();
if let Err(e) = state.write_nodes_to_file() {
error!("Failed to write nodes to cache_sources: {:?}", e);
}
info!("Nodes updated to: {:?}", state.nodes);
CacherResponse::SetNodes(Ok("Nodes updated successfully".to_string()))
}
Expand All @@ -1215,7 +1294,9 @@ fn handle_request(
*state = State::new(&state.drive_path);
state.nodes = nodes;
state.save();

if let Err(e) = state.write_nodes_to_file() {
error!("Failed to write nodes to cache_sources: {:?}", e);
}
info!(
"Hypermap-cacher reset complete. New nodes: {:?}",
state.nodes
Expand Down Expand Up @@ -1343,6 +1424,24 @@ fn init(our: Address) {
info!("Hypermap Cacher process starting...");

let drive_path = vfs::create_drive(our.package_id(), "cache", None).unwrap();
// Create alternate drive for initfiles and read the test data
let alt_drive_path = vfs::create_drive(our.package_id(), "initfiles", None).unwrap();

// Try to read the cache_sources file from the initfiles drive
match vfs::open_file(&format!("{}/cache_sources", alt_drive_path), false, None) {
Ok(file) => match file.read() {
Ok(contents) => {
let content_str = String::from_utf8_lossy(&contents);
info!("Contents of cache_sources: {}", content_str);
}
Err(e) => {
info!("Failed to read cache_sources: {}", e);
}
},
Err(e) => {
info!("Failed to open cache_sources: {}", e);
}
}

let bind_config = http::server::HttpBindingConfig::default().authenticated(false);
let mut server = http::server::HttpServer::new(5);
Expand Down
4 changes: 3 additions & 1 deletion hyperdrive/src/eth/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1460,9 +1460,11 @@ async fn handle_eth_config_action(
};
}
if save_providers {
let saved_configs = providers_to_saved_configs(&state.providers);

if let Ok(()) = tokio::fs::write(
state.home_directory_path.join(".eth_providers"),
serde_json::to_string(&providers_to_saved_configs(&state.providers)).unwrap(),
serde_json::to_string(&saved_configs).unwrap(),
)
.await
{
Expand Down
71 changes: 71 additions & 0 deletions hyperdrive/src/eth_config_utils.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
use lib::eth::{ProviderConfig, SavedConfigs};

pub fn add_provider_to_config(
eth_provider_config: &mut SavedConfigs,
new_provider: ProviderConfig,
) {
match &new_provider.provider {
lib::eth::NodeOrRpcUrl::RpcUrl { url, .. } => {
// Remove any existing provider with this URL
eth_provider_config.0.retain(|config| {
if let lib::eth::NodeOrRpcUrl::RpcUrl {
url: existing_url, ..
} = &config.provider
{
existing_url != url
} else {
true
}
});
}
lib::eth::NodeOrRpcUrl::Node { hns_update, .. } => {
// Remove any existing provider with this node name
eth_provider_config.0.retain(|config| {
if let lib::eth::NodeOrRpcUrl::Node {
hns_update: existing_update,
..
} = &config.provider
{
existing_update.name != hns_update.name
} else {
true
}
});
}
}

// Insert the new provider at the front (position 0)
eth_provider_config.0.insert(0, new_provider);
}

/// Extract unauthenticated RPC URLs from SavedConfigs
pub fn extract_rpc_url_providers_for_default_chain(
saved_configs: &lib::eth::SavedConfigs,
) -> Vec<lib::eth::NodeOrRpcUrl> {
saved_configs
.0
.iter()
.filter_map(|provider_config| {
// Only include providers for the default chain (8453 for mainnet, 31337 for simulation)
#[cfg(not(feature = "simulation-mode"))]
let target_chain_id = crate::CHAIN_ID; // 8453
#[cfg(feature = "simulation-mode")]
let target_chain_id = crate::CHAIN_ID; // 31337

if provider_config.chain_id != target_chain_id {
return None;
}

match &provider_config.provider {
lib::eth::NodeOrRpcUrl::RpcUrl { url, auth } => {
// Return the full RpcUrl enum variant with both url and auth
Some(lib::eth::NodeOrRpcUrl::RpcUrl {
url: url.clone(),
auth: auth.clone(),
})
}
lib::eth::NodeOrRpcUrl::Node { .. } => None, // Skip node providers
}
})
.collect()
}
Loading