Skip to content

Commit

Permalink
feat(network): introduce VerificationKind to use either ways
Browse files Browse the repository at this point in the history
  • Loading branch information
RolandSherwin committed Dec 11, 2023
1 parent b09b47d commit 518a7ac
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 27 deletions.
19 changes: 15 additions & 4 deletions sn_client/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use prometheus_client::registry::Registry;
use rand::{thread_rng, Rng};
use sn_networking::{
multiaddr_is_global, Error as NetworkError, GetRecordCfg, GetRecordError, NetworkBuilder,
NetworkEvent, PutRecordCfg, CLOSE_GROUP_SIZE,
NetworkEvent, PutRecordCfg, VerificationKind, CLOSE_GROUP_SIZE,
};
use sn_protocol::{
error::Error as ProtocolError,
Expand Down Expand Up @@ -407,7 +407,18 @@ impl Client {
// The `ChunkWithPayment` is only used to send out via PutRecord.
// The holders shall only hold the `Chunk` copies.
// Hence the fetched copies shall only be a `Chunk`
Some((RecordKind::Chunk, verification_cfg))

let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk)?.to_vec();
let random_nonce = thread_rng().gen::<u64>();
let expected_proof = ChunkProof::new(&stored_on_node, random_nonce);

Some((
VerificationKind::ChunkProof {
expected_proof,
nonce: random_nonce,
},
verification_cfg,
))
} else {
None
};
Expand Down Expand Up @@ -465,7 +476,7 @@ impl Client {
if let Err(err) = self
.network
.verify_chunk_existence(
&address,
address.clone(),
random_nonce,
expected_proof,
Quorum::N(NonZeroUsize::new(2).ok_or(Error::NonZeroUsizeWasInitialisedAsZero)?),
Expand Down Expand Up @@ -527,7 +538,7 @@ impl Client {
let put_cfg = PutRecordCfg {
put_quorum: Quorum::All,
re_attempt: true,
verification: Some((record_kind, verification_cfg)),
verification: Some((VerificationKind::Network, verification_cfg)),
};
Ok(self.network.put_record(record, &put_cfg).await?)
}
Expand Down
4 changes: 2 additions & 2 deletions sn_client/src/register.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::{Client, Error, Result, WalletClient};

use bls::PublicKey;
use libp2p::kad::{Quorum, Record};
use sn_networking::{GetRecordCfg, PutRecordCfg};
use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind};
use sn_protocol::{
error::Error as ProtocolError,
messages::RegisterCmd,
Expand Down Expand Up @@ -398,7 +398,7 @@ impl ClientRegister {
let put_cfg = PutRecordCfg {
put_quorum: Quorum::All,
re_attempt: true,
verification: Some((RecordKind::Register, verification_cfg)),
verification: Some((VerificationKind::Network, verification_cfg)),
};

// Register edits might exist so we cannot be sure that just because we get a record back that this should fail
Expand Down
19 changes: 15 additions & 4 deletions sn_networking/src/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ use libp2p::{
#[cfg(feature = "open-metrics")]
use prometheus_client::registry::Registry;
use sn_protocol::{
messages::{Request, Response},
storage::RecordKind,
messages::{ChunkProof, Nonce, Request, Response},
NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey,
};
use std::{
Expand Down Expand Up @@ -159,8 +158,20 @@ pub struct PutRecordCfg {
pub put_quorum: Quorum,
/// If set to true, we retry upto PUT_RETRY_ATTEMPTS times
pub re_attempt: bool,
/// Enables verification after writing. The RecordKind is used to determine the verification delay.
pub verification: Option<(RecordKind, GetRecordCfg)>,
/// Enables verification after writing. The VerificationKind is used to determine the method to use.
pub verification: Option<(VerificationKind, GetRecordCfg)>,
}

/// The methods in which verification on a PUT can be carried out.
#[derive(Debug, Clone)]
pub enum VerificationKind {
/// Uses the default KAD GET to perform verification.
Network,
/// Uses the hash based verification for chunks.
ChunkProof {
expected_proof: ChunkProof,
nonce: Nonce,
},
}

/// NodeBehaviour struct
Expand Down
36 changes: 19 additions & 17 deletions sn_networking/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ mod transfers;
use self::{cmd::SwarmCmd, error::Result};
pub use self::{
cmd::SwarmLocalState,
driver::{GetRecordCfg, NetworkBuilder, PutRecordCfg, SwarmDriver},
driver::{GetRecordCfg, NetworkBuilder, PutRecordCfg, SwarmDriver, VerificationKind},
error::{Error, GetRecordError},
event::{MsgResponder, NetworkEvent},
record_store::NodeRecordStore,
Expand All @@ -44,11 +44,11 @@ use libp2p::{
multiaddr::Protocol,
Multiaddr, PeerId,
};
use rand::{thread_rng, Rng};
use rand::Rng;
use sn_protocol::{
error::Error as ProtocolError,
messages::{ChunkProof, Nonce, Query, QueryResponse, Request, Response},
storage::{RecordKind, RecordType},
storage::RecordType,
NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey,
};
use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote};
Expand Down Expand Up @@ -257,7 +257,7 @@ impl Network {
/// Get the Chunk existence proof from the close nodes to the provided chunk address.
pub async fn verify_chunk_existence(
&self,
chunk_address: &NetworkAddress,
chunk_address: NetworkAddress,
nonce: Nonce,
expected_proof: ChunkProof,
quorum: Quorum,
Expand All @@ -282,7 +282,7 @@ impl Network {
// The close_nodes don't change often and the previous set of close_nodes might be taking a while to write
// the Chunk, so query them again incase of a failure.
if retry_attempts % 2 == 0 {
close_nodes = self.get_closest_peers(chunk_address, true).await?;
close_nodes = self.get_closest_peers(&chunk_address, true).await?;
}
let request = Request::Query(Query::GetChunkExistenceProof {
key: chunk_address.clone(),
Expand All @@ -293,16 +293,19 @@ impl Network {
.await;
let n_verified = responses
.into_iter()
.filter_map(|(_peer, resp)| {
.filter_map(|(peer, resp)| {
if let Ok(Response::Query(QueryResponse::GetChunkExistenceProof(Ok(proof)))) =
resp
{
if expected_proof.verify(&proof) {
debug!("Got a valid ChunkProof from {peer:?}");
Some(())
} else {
warn!("Failed to verify the ChunkProof from {peer:?}. The chunk might have been tampered?");
None
}
} else {
debug!("Did not get a valid response for the ChunkProof from {peer:?}");
None
}
})
Expand All @@ -312,7 +315,7 @@ impl Network {
if n_verified >= expected_n_verified {
return Ok(());
}
debug!("The obtained {n_verified} verified proofs did not match the expected {expected_n_verified} verified proofs");
warn!("The obtained {n_verified} verified proofs did not match the expected {expected_n_verified} verified proofs");
}

Err(Error::FailedToVerifyChunkProof(chunk_address.clone()))
Expand Down Expand Up @@ -543,7 +546,7 @@ impl Network {
})?;
let response = receiver.await?;

if let Some((record_kind, get_cfg)) = &cfg.verification {
if let Some((verification_kind, get_cfg)) = &cfg.verification {
// Generate a random duration between MAX_WAIT_BEFORE_READING_A_PUT and MIN_WAIT_BEFORE_READING_A_PUT
let wait_duration = rand::thread_rng()
.gen_range(MIN_WAIT_BEFORE_READING_A_PUT..MAX_WAIT_BEFORE_READING_A_PUT);
Expand All @@ -553,16 +556,15 @@ impl Network {
debug!("Attempting to verify {pretty_key:?} after we've slept for {wait_duration:?}");

// Verify the record is stored, requiring re-attempts
if let RecordKind::Chunk = record_kind {
// use ChunkProof when we are trying to verify a Chunk
let address = NetworkAddress::from_record_key(&record_key);
let random_nonce = thread_rng().gen::<u64>();

let expected_proof = ChunkProof::new(&record.value, random_nonce);
if let VerificationKind::ChunkProof {
expected_proof,
nonce,
} = verification_kind
{
self.verify_chunk_existence(
&address,
random_nonce,
expected_proof,
NetworkAddress::from_record_key(&record_key),
*nonce,
expected_proof.clone(),
get_cfg.get_quorum,
get_cfg.re_attempt,
)
Expand Down
5 changes: 5 additions & 0 deletions sn_node/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,12 @@ impl Node {
let mut result = Err(ProtocolError::ChunkDoesNotExist(key.clone()));
if let Ok(Some(record)) = network.get_local_record(&key.to_record_key()).await {
let proof = ChunkProof::new(&record.value, nonce);
trace!("Chunk proof for {key:?} is {proof:?}");
result = Ok(proof)
} else {
trace!(
"Could not get ChunkProof for {key:?} as we don't have the record locally."
);
}

QueryResponse::GetChunkExistenceProof(result)
Expand Down

0 comments on commit 518a7ac

Please sign in to comment.