Skip to content

Commit

Permalink
feat: transfer multiple files
Browse files Browse the repository at this point in the history
* first pass hack at transfering multiple files
* rebase & refactor protocol, still not quite over the line yet
* adding client side
* uncover and try to fix write bugs
* fix reader error & get tests passing!
* make clippy happy
* make clippy even MORE happy
* use `bao::Hash` for `Blob.hash` field
* PR review suggestions
* serde `bao::Hash`
* streamline protocol to only send `Collections`
* clean up read & decode story
* clean up `get` side of the protocol to make it more legible
* replace `Event::Requested` with `Event::ReceivedCollection`
* add `empty_files` test, and clean up tests in general
* I live for clippy's approval.
* `Collection.name() returns &str`
  • Loading branch information
ramfox committed Jan 27, 2023
1 parent 55ff6ed commit bc040ea
Show file tree
Hide file tree
Showing 8 changed files with 609 additions and 239 deletions.
118 changes: 118 additions & 0 deletions src/blobs.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
use std::io::Read;

use anyhow::{Context, Result};
use bytes::Bytes;
use serde::{Deserialize, Serialize};

#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct Collection {
///
/// The name of this collection
pub(crate) name: String,
/// Links to the blobs in this collection
pub(crate) blobs: Vec<Blob>,
/// The total size of the raw_data referred to by all links
pub(crate) total_blobs_size: u64,
}

impl Collection {
pub async fn decode_from(data: Bytes, outboard: &[u8], hash: bao::Hash) -> Result<Self> {
// TODO: avoid copy
let outboard = outboard.to_vec();
// verify that the content of data matches the expected hash
let mut decoder =
bao::decode::Decoder::new_outboard(std::io::Cursor::new(&data[..]), &*outboard, &hash);

let mut buf = [0u8; 1024];
loop {
// TODO: write & use an `async decoder`
let read = decoder
.read(&mut buf)
.context("hash of Collection data does not match")?;
if read == 0 {
break;
}
}
let c: Collection =
postcard::from_bytes(&data).context("failed to serialize Collection data")?;
Ok(c)
}

pub fn total_blobs_size(&self) -> u64 {
self.total_blobs_size
}

pub fn name(&self) -> &str {
&self.name
}

pub fn total_entries(&self) -> u64 {
self.blobs.len() as u64
}
}

#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct Blob {
/// The name of this blob of data
pub(crate) name: String,
/// The hash of the blob of data
#[serde(with = "hash_serde")]
pub(crate) hash: bao::Hash,
}

mod hash_serde {
use serde::{de, Deserializer, Serializer};

pub fn serialize<S>(h: &bao::Hash, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_bytes(h.as_bytes())
}

pub fn deserialize<'de, D>(d: D) -> Result<bao::Hash, D::Error>
where
D: Deserializer<'de>,
{
struct HashVisitor;

impl<'de> de::Visitor<'de> for HashVisitor {
type Value = bao::Hash;

fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("an array of 32 bytes containing hash data")
}

fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
let b: [u8; 32] = v.try_into().map_err(E::custom)?;
Ok(bao::Hash::from(b))
}
}

d.deserialize_bytes(HashVisitor)
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn roundtrip_blob() {
let b = Blob {
name: "test".to_string(),
hash: bao::Hash::from_hex(
"3aa61c409fd7717c9d9c639202af2fae470c0ef669be7ba2caea5779cb534e9d",
)
.unwrap(),
};

let mut buf = bytes::BytesMut::zeroed(1024);
postcard::to_slice(&b, &mut buf).unwrap();
let deserialize_b: Blob = postcard::from_bytes(&buf).unwrap();
assert_eq!(b, deserialize_b);
}
}
Loading

0 comments on commit bc040ea

Please sign in to comment.