Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
152 changes: 152 additions & 0 deletions src/toxcore/dht_new/codec.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
/*
Copyright (C) 2013 Tox project All Rights Reserved.
Copyright © 2016-2017 Zetok Zalbavar <zexavexxe@gmail.com>
Copyright © 2018 Namsoo CHO <nscho66@gmail.com>

This file is part of Tox.

Tox is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

Tox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with Tox. If not, see <http://www.gnu.org/licenses/>.
*/

/*! Codec for encoding/decoding DHT Packets & DHT Request packets using tokio-io
*/

use toxcore::dht_new::packet::*;
use toxcore::binary_io_new::*;

use std::io;
use std::io::{Error, ErrorKind};
use tokio_core::net::UdpCodec;
use std::net::SocketAddr;

/// Type representing Dht UDP packets.
pub type DhtUdpPacket = (SocketAddr, DhtBase);

/// Type representing received Dht UDP packets.
pub type DhtRecvUdpPacket = (SocketAddr, Option<DhtBase>);

/**
SendNodes
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it should be explained better why we use SendNodes here to estimate maximum size of DhtPacket.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch!

size | description
1 | packet type
32 | public key
24 | nonce
1 | number of response nodes
[39,204]| packed nodes
8 | Request Id (Ping Id)
---------------------------------
270 bytes maximun.
Because size of SendNodes is largest in DHT related packets
512 is enough for DhtPacket
*/
pub const MAX_DHT_PACKET_SIZE: usize = 512;

/// Struct to use for {de-,}serializing DHT UDP packets.
pub struct DhtCodec;

impl UdpCodec for DhtCodec {
type In = DhtRecvUdpPacket;
type Out = DhtUdpPacket;

fn decode(&mut self, src: &SocketAddr, buf: &[u8]) -> io::Result<Self::In>
{
match DhtBase::from_bytes(buf) {
IResult::Incomplete(_) => {
Err(Error::new(ErrorKind::Other,
"DhtBase packet should not be incomplete"))
},
IResult::Error(e) => {
Err(Error::new(ErrorKind::Other,
format!("deserialize DhtBase packet error: {:?}", e)))
},
IResult::Done(_, encrypted_packet) => {
Ok((*src, Some(encrypted_packet)))
}
}
}

fn encode(&mut self, (addr, dp): Self::Out, into: &mut Vec<u8>) -> SocketAddr {
let mut buf = [0; MAX_DHT_PACKET_SIZE];
if let Ok((_, size)) = dp.to_bytes((&mut buf, 0)) {
into.extend(&buf[..size]);
} else {
// TODO: move from tokio-core to tokio and return error instead of panic
panic!("DhtBase to_bytes error {:?}", dp);
}
addr
}
}

#[cfg(test)]
mod test {
use tokio_core::net::UdpCodec;
use std::net::SocketAddr;

use super::*;
use toxcore::dht_new::packet_kind::*;

use quickcheck::{quickcheck, TestResult};

#[test]
fn dht_codec_decode_test() {
fn with_dp(dp: DhtPacket) -> TestResult {
// need an invalid PacketKind for DhtPacket
if dp.packet_kind as u8 <= PacketKind::SendNodes as u8 {
return TestResult::discard()
}

let kind = dp.packet_kind.clone() as u8;
// TODO: random SocketAddr
let addr = SocketAddr::V4("0.1.2.3:4".parse().unwrap());
let mut tc = DhtCodec;

let mut buf = [0; 1024];
let bytes = dp.to_bytes((&mut buf, 0)).unwrap().0;

let (decoded_a, decoded_dp) = tc.decode(&addr, &bytes)
.unwrap();
// it did have correct packet
let decoded_dp = decoded_dp.unwrap();

assert_eq!(addr, decoded_a);
assert_eq!(DhtBase::DhtPacket(dp), decoded_dp);

// make it error
bytes[0] = kind;
let (r_addr, none) = tc.decode(&addr, &bytes).unwrap();
assert_eq!(addr, r_addr);
assert!(none.is_none());

TestResult::passed()
}
quickcheck(with_dp as fn(DhtPacket) -> TestResult);
}

#[test]
fn dht_codec_encode_test() {
fn with_dp(dp: DhtPacket) {
// TODO: random SocketAddr
let addr = SocketAddr::V4("5.6.7.8:9".parse().unwrap());
let mut buf = Vec::new();
let mut tc = DhtCodec;

let socket = tc.encode((addr, DhtBase::DhtPacket(dp.clone())), &mut buf);
assert_eq!(addr, socket);
let mut enc_buf = [0; MAX_DHT_PACKET_SIZE];
let (_, size) = dp.to_bytes((&mut enc_buf, 0)).unwrap();
assert_eq!(buf, enc_buf[..size].to_vec());
}
quickcheck(with_dp as fn(DhtPacket));
}
}
76 changes: 35 additions & 41 deletions src/toxcore/dht_new/kbucket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ impl Bucket {
#[cfg(test)]
fn find(&self, pk: &PublicKey) -> Option<usize> {
for (n, node) in self.nodes.iter().enumerate() {
if node.pk() == pk {
if &node.pk == pk {
return Some(n)
}
}
Expand Down Expand Up @@ -181,7 +181,7 @@ impl Bucket {
trace!(target: "Bucket", "With bucket: {:?}; PK: {:?} and new node: {:?}",
self, base_pk, new_node);

match self.nodes.binary_search_by(|n| base_pk.distance(n.pk(), new_node.pk())) {
match self.nodes.binary_search_by(|n| base_pk.distance(&n.pk, &new_node.pk)) {
Ok(index) => {
debug!(target: "Bucket",
"Updated: the node was already in the bucket.");
Expand Down Expand Up @@ -229,7 +229,7 @@ impl Bucket {
*/
pub fn remove(&mut self, base_pk: &PublicKey, node_pk: &PublicKey) {
trace!(target: "Bucket", "Removing PackedNode with PK: {:?}", node_pk);
match self.nodes.binary_search_by(|n| base_pk.distance(n.pk(), node_pk) ) {
match self.nodes.binary_search_by(|n| base_pk.distance(&n.pk, node_pk) ) {
Ok(index) => {
self.nodes.remove(index);
},
Expand All @@ -241,7 +241,7 @@ impl Bucket {

/// Check if node with given PK is in the `Bucket`.
pub fn contains(&self, pk: &PublicKey) -> bool {
self.nodes.iter().any(|n| n.pk() == pk)
self.nodes.iter().any(|n| &n.pk == pk)
}

/// Get the capacity of the Bucket.
Expand Down Expand Up @@ -335,13 +335,7 @@ impl Kbucket {
self.buckets.len() as u8
}

/// Get the PK of the Kbucket. Used in tests only
#[cfg(test)]
pub fn pk(&self) -> PublicKey {
self.pk
}

#[cfg(test)]
fn find(&self, pk: &PublicKey) -> Option<(usize, usize)> {
for (bucket_index, bucket) in self.buckets.iter().enumerate() {
match bucket.find(pk) {
Expand Down Expand Up @@ -382,7 +376,7 @@ impl Kbucket {
debug!(target: "Kbucket", "Trying to add PackedNode.");
trace!(target: "Kbucket", "With PN: {:?}; and self: {:?}", node, self);

match self.bucket_index(node.pk()) {
match self.bucket_index(&node.pk) {
Some(index) => self.buckets[index].try_add(&self.pk, node),
None => {
trace!("Failed to add node: {:?}", node);
Expand Down Expand Up @@ -623,7 +617,7 @@ mod test {
fn dht_bucket_1_capacity_try_add_test() {
fn with_nodes(n1: PackedNode, n2: PackedNode) -> TestResult {
let pk = PublicKey([0; PUBLICKEYBYTES]);
if pk.distance(n2.pk(), n1.pk()) != Ordering::Greater {
if pk.distance(&n2.pk, &n1.pk) != Ordering::Greater {
// n2 should be greater to check we can't add it
return TestResult::discard()
}
Expand Down Expand Up @@ -651,7 +645,7 @@ mod test {
let mut bucket = Bucket::new(Some(bucket_size));

let non_existent_node: PackedNode = Arbitrary::arbitrary(&mut rng);
bucket.remove(&base_pk, non_existent_node.pk()); // "removing" non-existent node
bucket.remove(&base_pk, &non_existent_node.pk); // "removing" non-existent node
assert_eq!(true, bucket.is_empty());

let nodes = vec![Arbitrary::arbitrary(&mut rng); num as usize];
Expand All @@ -667,7 +661,7 @@ mod test {
}

for node in &nodes {
bucket.remove(&base_pk, node.pk());
bucket.remove(&base_pk, &node.pk);
}
assert_eq!(true, bucket.is_empty());
}
Expand Down Expand Up @@ -729,7 +723,7 @@ mod test {
let pk = nums_to_pk(a, b, c, d);
let kbucket = Kbucket::new(buckets, &pk);
assert_eq!(buckets, kbucket.size());
assert_eq!(pk, kbucket.pk());
assert_eq!(pk, kbucket.pk);
}
quickcheck(with_pk as fn(u64, u64, u64, u64, u8));
}
Expand Down Expand Up @@ -792,7 +786,7 @@ mod test {

// Check for actual removing
for node in &nodes {
kbucket.remove(node.pk());
kbucket.remove(&node.pk);
}
assert!(kbucket.is_empty());
TestResult::passed()
Expand Down Expand Up @@ -820,7 +814,7 @@ mod test {

// check whether number of correct nodes that are returned is right
let correctness = |should, kbc: &Kbucket| {
assert_eq!(kbc.get_closest(&pk), kbc.get_closest(&kbc.pk()));
assert_eq!(kbc.get_closest(&pk), kbc.get_closest(&kbc.pk));

let got_nodes = kbc.get_closest(&pk);
let mut got_correct = 0;
Expand Down Expand Up @@ -901,19 +895,19 @@ mod test {
kbucket.try_add(n1);
kbucket.try_add(n2);
kbucket.try_add(n3);
assert_eq!(Some((46, 0)), kbucket.find(n1.pk()));
assert_eq!(Some((46, 1)), kbucket.find(n2.pk()));
assert_eq!(Some((46, 2)), kbucket.find(n3.pk()));
assert_eq!(Some((46, 0)), kbucket.find(&n1.pk));
assert_eq!(Some((46, 1)), kbucket.find(&n2.pk));
assert_eq!(Some((46, 2)), kbucket.find(&n3.pk));
});
with_data(|kbucket, n1, n2, n3| {
// insert order: n3 n2 n1 maps to position
// n1 => 0, n2 => 1, n3 => 2
kbucket.try_add(n3);
kbucket.try_add(n2);
kbucket.try_add(n1);
assert_eq!(Some((46, 0)), kbucket.find(n1.pk()));
assert_eq!(Some((46, 1)), kbucket.find(n2.pk()));
assert_eq!(Some((46, 2)), kbucket.find(n3.pk()));
assert_eq!(Some((46, 0)), kbucket.find(&n1.pk));
assert_eq!(Some((46, 1)), kbucket.find(&n2.pk));
assert_eq!(Some((46, 2)), kbucket.find(&n3.pk));
});
// Check that removing order does not affect
// the order of nodes inside
Expand All @@ -923,32 +917,32 @@ mod test {
kbucket.try_add(n2); // => 1
kbucket.try_add(n3); // => 2
// test removing from the beginning (n1 => 0)
kbucket.remove(n1.pk());
assert_eq!(None, kbucket.find(n1.pk()));
assert_eq!(Some((46, 0)), kbucket.find(n2.pk()));
assert_eq!(Some((46, 1)), kbucket.find(n3.pk()));
kbucket.remove(&n1.pk);
assert_eq!(None, kbucket.find(&n1.pk));
assert_eq!(Some((46, 0)), kbucket.find(&n2.pk));
assert_eq!(Some((46, 1)), kbucket.find(&n3.pk));
});
with_data(|kbucket, n1, n2, n3| {
// prepare kbucket
kbucket.try_add(n1); // => 0
kbucket.try_add(n2); // => 1
kbucket.try_add(n3); // => 2
// test removing from the middle (n2 => 1)
kbucket.remove(n2.pk());
assert_eq!(Some((46, 0)), kbucket.find(n1.pk()));
assert_eq!(None, kbucket.find(n2.pk()));
assert_eq!(Some((46, 1)), kbucket.find(n3.pk()));
kbucket.remove(&n2.pk);
assert_eq!(Some((46, 0)), kbucket.find(&n1.pk));
assert_eq!(None, kbucket.find(&n2.pk));
assert_eq!(Some((46, 1)), kbucket.find(&n3.pk));
});
with_data(|kbucket, n1, n2, n3| {
// prepare kbucket
kbucket.try_add(n1); // => 0
kbucket.try_add(n2); // => 1
kbucket.try_add(n3); // => 2
// test removing from the end (n3 => 2)
kbucket.remove(n3.pk());
assert_eq!(Some((46, 0)), kbucket.find(n1.pk()));
assert_eq!(Some((46, 1)), kbucket.find(n2.pk()));
assert_eq!(None, kbucket.find(n3.pk()));
kbucket.remove(&n3.pk);
assert_eq!(Some((46, 0)), kbucket.find(&n1.pk));
assert_eq!(Some((46, 1)), kbucket.find(&n2.pk));
assert_eq!(None, kbucket.find(&n3.pk));
});
}

Expand All @@ -961,13 +955,13 @@ mod test {
let (pk, _) = gen_keypair();
let mut kbucket = Kbucket::new(n, &pk);
assert!(!kbucket.contains(&pk));
assert!(pns.iter().all(|pn| !kbucket.contains(pn.pk())));
assert!(pns.iter().all(|pn| !kbucket.contains(&pn.pk)));

for pn in &pns {
kbucket.try_add(pn);
}

assert!(kbucket.iter().all(|pn| kbucket.contains(pn.pk())));
assert!(kbucket.iter().all(|pn| kbucket.contains(&pn.pk)));

TestResult::passed()
}
Expand All @@ -984,7 +978,7 @@ mod test {
{
let fitting_nodes = pns.iter().any(|p1| pns.iter()
.filter(|p2| p1 != *p2)
.any(|p2| kbucket_index(&pk, p1.pk()) == kbucket_index(&pk, p2.pk())));
.any(|p2| kbucket_index(&pk, &p1.pk) == kbucket_index(&pk, &p2.pk)));
if !fitting_nodes {
return TestResult::discard()
}
Expand All @@ -997,12 +991,12 @@ mod test {

for node in &pns {
if kbucket.try_add(node) {
let index = kbucket_index(&pk, node.pk());
let index = kbucket_index(&pk, &node.pk);
// none of nodes with the same index can be added
// to the kbucket
assert!(pns.iter()
.filter(|pn| kbucket_index(&pk, pn.pk()) == index)
.all(|pn| !kbucket.can_add(pn.pk())));
.filter(|pn| kbucket_index(&pk, &pn.pk) == index)
.all(|pn| !kbucket.can_add(&pn.pk)));
}
}

Expand Down
2 changes: 2 additions & 0 deletions src/toxcore/dht_new/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,5 @@
pub mod packet;
pub mod kbucket;
pub mod packed_node;
pub mod codec;
pub mod packet_kind;
7 changes: 0 additions & 7 deletions src/toxcore/dht_new/packed_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,6 @@ impl PackedNode {
trace!("With address: {:?}", self);
self.saddr
}

/// Get an PK from the `PackedNode`.
pub fn pk(&self) -> &PublicKey {
trace!(target: "PackedNode", "Getting PK from PackedNode.");
trace!("With PK: {:?}", self);
&self.pk
}
}


Expand Down
Loading