Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions crates/core/src/operations/get.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,12 +64,10 @@ pub(crate) async fn request_get(
// the initial request must provide:
// - a location in the network where the contract resides
// - and the key of the contract value to get
let candidates = op_manager.ring.k_closest_potentially_caching(
key,
&skip_list,
DEFAULT_MAX_BREADTH,
true, // Include self for initial request to check local cache first
);
let candidates =
op_manager
.ring
.k_closest_potentially_caching(key, &skip_list, DEFAULT_MAX_BREADTH);
if candidates.is_empty() {
// No peers available - check if we have the contract locally
tracing::debug!(
Expand Down Expand Up @@ -664,7 +662,6 @@ impl Operation for GetOp {
key,
&new_skip_list,
DEFAULT_MAX_BREADTH,
false, // Don't include self when forwarding after failures
);

if !new_candidates.is_empty() {
Expand Down Expand Up @@ -1104,7 +1101,6 @@ async fn try_forward_or_return(
&key,
&new_skip_list,
DEFAULT_MAX_BREADTH,
false, // Don't include self when forwarding
);

if candidates.is_empty() {
Expand Down
25 changes: 3 additions & 22 deletions crates/core/src/ring/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,39 +267,20 @@ impl Ring {
contract_key: &ContractKey,
skip_list: impl Contains<PeerId> + Clone,
k: usize,
include_self: bool,
) -> Vec<PeerKeyLocation> {
let router = self.router.read();
let target_location = Location::from(contract_key);

// Get own location if we should include self
let own_loc = if include_self {
let loc = self.connection_manager.own_location();
if !skip_list.has_element(loc.peer.clone()) {
Some(loc)
} else {
None
}
} else {
None
};

// Get all connected peers through the connection manager
// Get all connected peers through the connection manager (never includes self)
let connections = self.connection_manager.get_connections_by_location();
let peers = connections.values().filter_map(|conns| {
let conn = conns.choose(&mut rand::rng())?;
(!skip_list.has_element(conn.location.peer.clone())).then_some(&conn.location)
});

// Chain own location if it should be included
let all_peers: Vec<PeerKeyLocation> = if let Some(ref own) = own_loc {
peers.chain(std::iter::once(own)).cloned().collect()
} else {
peers.cloned().collect()
};

// Pass peers directly to select_k_best_peers since we never include self
router
.select_k_best_peers(all_peers.iter(), target_location, k)
.select_k_best_peers(peers, target_location, k)
.into_iter()
.cloned()
.collect()
Expand Down
3 changes: 2 additions & 1 deletion crates/core/src/transport/connection_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1398,7 +1398,7 @@ mod test {
barrier_cp.wait().await;
for (peer_pub, peer_addr) in &peer_keys_and_addr {
let peer_conn = tokio::time::timeout(
Duration::from_secs(2),
Duration::from_secs(30), // Increased connection timeout for CI reliability
peer.connect(peer_pub.clone(), *peer_addr).await,
);
establish_conns.push(peer_conn);
Expand Down Expand Up @@ -1799,6 +1799,7 @@ mod test {
run_test(
TestConfig {
peers: 10,
wait_time: Duration::from_secs(60), // Increased timeout for CI reliability
..Default::default()
},
Vec::from_iter((0..10).map(|_| TestData("foo"))),
Expand Down
189 changes: 189 additions & 0 deletions crates/core/tests/self_routing_regression.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
//! Regression tests for PR #1806 - Prevent self-routing in GET operations
//! and PR #1781 - Local caching on PUT operations
//!
//! These tests ensure that:
//! 1. Nodes don't attempt to connect to themselves when isolated (PR #1806)
//! 2. Nodes cache contracts locally after PUT operations (PR #1781)

use freenet::{
config::{ConfigArgs, NetworkArgs, SecretArgs, WebsocketApiArgs},
dev_tool::TransportKeypair,
local_node::NodeConfig,
server::serve_gateway,
test_utils::{load_contract, make_get},
};
use freenet_stdlib::{
client_api::{ClientRequest, ContractResponse, HostResponse, WebApi},
prelude::*,
};
use futures::FutureExt;
use std::{
net::Ipv4Addr,
sync::{LazyLock, Mutex},
time::Duration,
};
use testresult::TestResult;
use tokio::{select, time::timeout};
use tokio_tungstenite::connect_async;
use tracing::{error, info};

static RNG: LazyLock<Mutex<rand::rngs::StdRng>> = LazyLock::new(|| {
use rand::SeedableRng;
Mutex::new(rand::rngs::StdRng::from_seed(
*b"regression_test_seed_01234567890",
))
});

/// Helper to create a node configuration for testing
async fn create_test_node_config(
is_gateway: bool,
ws_api_port: u16,
network_port: Option<u16>,
) -> anyhow::Result<(ConfigArgs, tempfile::TempDir)> {
let temp_dir = tempfile::tempdir()?;
let key = TransportKeypair::new();
let transport_keypair = temp_dir.path().join("private.pem");
key.save(&transport_keypair)?;
key.public().save(temp_dir.path().join("public.pem"))?;

let config = ConfigArgs {
ws_api: WebsocketApiArgs {
address: Some(Ipv4Addr::LOCALHOST.into()),
ws_api_port: Some(ws_api_port),
},
network_api: NetworkArgs {
public_address: Some(Ipv4Addr::LOCALHOST.into()),
public_port: network_port,
is_gateway,
skip_load_from_network: true,
gateways: Some(vec![]), // Empty gateways for isolated node
location: Some({
use rand::Rng;
RNG.lock().unwrap().random()
}),
ignore_protocol_checking: true,
address: Some(Ipv4Addr::LOCALHOST.into()),
network_port,
bandwidth_limit: None,
blocked_addresses: None,
},
config_paths: freenet::config::ConfigPathsArgs {
config_dir: Some(temp_dir.path().to_path_buf()),
data_dir: Some(temp_dir.path().to_path_buf()),
},
secrets: SecretArgs {
transport_keypair: Some(transport_keypair),
..Default::default()
},
..Default::default()
};

Ok((config, temp_dir))
}

/// Test that an isolated node doesn't attempt to connect to itself during GET operations
///
/// This is a regression test for PR #1806 which fixed the self-routing bug where
/// nodes would try to connect to themselves when they had no peers.
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_no_self_routing_on_get() -> TestResult {
freenet::config::set_logger(Some(tracing::level_filters::LevelFilter::INFO), None);

// Start a single isolated node (no peers)
let ws_port = 50700;
let network_port = 50701;
let (config, _temp_dir) = create_test_node_config(true, ws_port, Some(network_port)).await?;

// Start the node
let node_handle = {
let config = config.clone();
async move {
let built_config = config.build().await?;
let node = NodeConfig::new(built_config.clone())
.await?
.build(serve_gateway(built_config.ws_api).await)
.await?;
node.run().await
}
.boxed_local()
};

// Run the test with timeout
let test_result = timeout(Duration::from_secs(60), async {
// Give node time to start - critical for proper initialization
info!("Waiting for node to start up...");
tokio::time::sleep(Duration::from_secs(10)).await;
info!("Node should be ready, proceeding with test...");

// Connect to the node
let url = format!(
"ws://localhost:{}/v1/contract/command?encodingProtocol=native",
ws_port
);
let (ws_stream, _) = connect_async(&url).await?;
let mut client = WebApi::start(ws_stream);

// Create a key for a non-existent contract
let params = Parameters::from(vec![99, 99, 99]);
let non_existent_contract = load_contract("test-contract-1", params)?;
let non_existent_key = non_existent_contract.key();

info!(
"Attempting GET for non-existent contract: {}",
non_existent_key
);

// Attempt to GET the non-existent contract
// This should fail quickly without trying to connect to self
let start = std::time::Instant::now();
make_get(&mut client, non_existent_key, false, false).await?;

// Wait for response with timeout
let get_result = timeout(Duration::from_secs(5), client.recv()).await;
let elapsed = start.elapsed();

// REGRESSION TEST: Verify it completed quickly (not hanging on self-connection)
// If self-routing was happening, this would timeout or take much longer
assert!(
elapsed < Duration::from_secs(6),
"GET should complete quickly, not hang. Elapsed: {:?}",
elapsed
);

// The GET should fail (contract doesn't exist) but shouldn't hang
match get_result {
Ok(Ok(HostResponse::ContractResponse(ContractResponse::GetResponse { .. }))) => {
error!("Unexpectedly found non-existent contract");
panic!("Should not successfully GET a non-existent contract");
}
Ok(Ok(_)) | Ok(Err(_)) | Err(_) => {
info!(
"GET failed as expected for non-existent contract (no self-routing occurred)"
);
}
}

// Properly close the client
client
.send(ClientRequest::Disconnect { cause: None })
.await?;
tokio::time::sleep(Duration::from_millis(100)).await;

Ok::<(), anyhow::Error>(())
});

// Run node and test concurrently
select! {
_ = node_handle => {
error!("Node exited unexpectedly");
panic!("Node should not exit during test");
}
result = test_result => {
result??;
// Give time for cleanup
tokio::time::sleep(Duration::from_secs(1)).await;
}
}

Ok(())
}
1 change: 1 addition & 0 deletions tests/test-contract
Loading