diff --git a/Cargo.lock b/Cargo.lock index a9cff9e3ba..b0ef1bf0ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5928,18 +5928,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.25" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.25" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", diff --git a/iroh-bytes/src/format.rs b/iroh-bytes/src/format.rs new file mode 100644 index 0000000000..2ccb8f3baa --- /dev/null +++ b/iroh-bytes/src/format.rs @@ -0,0 +1,16 @@ +//! Defines data formats for HashSeq. +//! +//! The exact details how to use a HashSeq for specific purposes is up to the +//! user. However, the following approach is used by iroh formats: +//! +//! The first child blob is a metadata blob. It starts with a header, followed +//! by serialized metadata. We mostly use [postcard] for serialization. The +//! metadata either implicitly or explicitly refers to the other blobs in the +//! HashSeq by index. +//! +//! In a very simple case, the metadata just an array of items, where each item +//! is the metadata for the corresponding blob. The metadata array will have +//! n-1 items, where n is the number of blobs in the HashSeq. +//! +//! [postcard]: https://docs.rs/postcard/latest/postcard/ +pub mod collection; diff --git a/iroh/src/collection.rs b/iroh-bytes/src/format/collection.rs similarity index 63% rename from iroh/src/collection.rs rename to iroh-bytes/src/format/collection.rs index 787c79c6cb..7bf14ebf94 100644 --- a/iroh/src/collection.rs +++ b/iroh-bytes/src/format/collection.rs @@ -4,34 +4,81 @@ use std::collections::BTreeMap; use anyhow::Context; use bao_tree::blake3; use bytes::Bytes; -use iroh_bytes::get::fsm::EndBlobNext; -use iroh_bytes::get::Stats; -use iroh_bytes::hashseq::HashSeq; -use iroh_bytes::store::MapEntry; -use iroh_bytes::util::TempTag; -use iroh_bytes::{BlobFormat, Hash}; use iroh_io::AsyncSliceReaderExt; use serde::{Deserialize, Serialize}; +use crate::{ + get::{fsm, Stats}, + hashseq::HashSeq, + store::MapEntry, + util::TempTag, + BlobFormat, Hash, +}; + /// A collection of blobs /// /// Note that the format is subject to change. -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize, Default)] pub struct Collection { /// Links to the blobs in this collection - pub(crate) blobs: Vec, - /// The total size of the raw_data referred to by all links - pub(crate) total_blobs_size: u64, + blobs: Vec<(String, Hash)>, +} + +impl std::ops::Index for Collection { + type Output = (String, Hash); + + fn index(&self, index: usize) -> &Self::Output { + &self.blobs[index] + } +} + +impl Extend<(K, V)> for Collection +where + K: Into, + V: Into, +{ + fn extend>(&mut self, iter: T) { + self.blobs + .extend(iter.into_iter().map(|(k, v)| (k.into(), v.into()))); + } +} + +impl FromIterator<(K, V)> for Collection +where + K: Into, + V: Into, +{ + fn from_iter>(iter: T) -> Self { + let mut res = Self::default(); + res.extend(iter); + res + } +} + +impl IntoIterator for Collection { + type Item = (String, Hash); + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.blobs.into_iter() + } } /// Metadata for a collection +/// +/// This is the wire format for the metadata blob. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] struct CollectionMeta { + header: [u8; 13], // Must contain "CollectionV0." names: Vec, - total_blobs_size: u64, } impl Collection { + /// The header for the collection format. + /// + /// This is the start of the metadata blob. + pub const HEADER: &'static [u8; 13] = b"CollectionV0."; + /// Convert the collection to an iterator of blobs, with the last being the /// root blob. /// @@ -39,8 +86,8 @@ impl Collection { /// hash of the last blob as the collection hash. pub fn to_blobs(&self) -> impl Iterator { let meta = CollectionMeta { + header: *Self::HEADER, names: self.names(), - total_blobs_size: self.total_blobs_size(), }; let meta_bytes = postcard::to_stdvec(&meta).unwrap(); let meta_bytes_hash = blake3::hash(&meta_bytes).into(); @@ -56,15 +103,15 @@ impl Collection { /// Returns the fsm at the start of the first child blob (if any), /// the links array, and the collection. pub async fn read_fsm( - fsm_at_start_root: iroh_bytes::get::fsm::AtStartRoot, - ) -> anyhow::Result<(iroh_bytes::get::fsm::EndBlobNext, HashSeq, Collection)> { + fsm_at_start_root: fsm::AtStartRoot, + ) -> anyhow::Result<(fsm::EndBlobNext, HashSeq, Collection)> { let (next, links) = { let curr = fsm_at_start_root.next(); let (curr, data) = curr.concatenate_into_vec().await?; let links = HashSeq::new(data.into()).context("links could not be parsed")?; (curr.next(), links) }; - let EndBlobNext::MoreChildren(at_meta) = next else { + let fsm::EndBlobNext::MoreChildren(at_meta) = next else { anyhow::bail!("expected meta"); }; let (next, collection) = { @@ -73,7 +120,13 @@ impl Collection { let curr = at_meta.next(meta_link); let (curr, names) = curr.concatenate_into_vec().await?; let names = postcard::from_bytes::(&names)?; - let collection = Collection::from_parts(children, names)?; + anyhow::ensure!( + names.header == *Self::HEADER, + "expected header {:?}, got {:?}", + Self::HEADER, + names.header + ); + let collection = Collection::from_parts(children, names); (curr.next(), collection) }; Ok((next, links, collection)) @@ -83,14 +136,14 @@ impl Collection { /// /// Returns the collection, a map from blob offsets to bytes, and the stats. pub async fn read_fsm_all( - fsm_at_start_root: iroh_bytes::get::fsm::AtStartRoot, + fsm_at_start_root: crate::get::fsm::AtStartRoot, ) -> anyhow::Result<(Collection, BTreeMap, Stats)> { let (next, links, collection) = Self::read_fsm(fsm_at_start_root).await?; let mut res = BTreeMap::new(); let mut curr = next; let end = loop { match curr { - EndBlobNext::MoreChildren(more) => { + fsm::EndBlobNext::MoreChildren(more) => { let child_offset = more.child_offset(); let Some(hash) = links.get(usize::try_from(child_offset)?) else { break more.finish(); @@ -100,7 +153,7 @@ impl Collection { res.insert(child_offset - 1, blob.into()); curr = next.next(); } - EndBlobNext::Closing(closing) => break closing, + fsm::EndBlobNext::Closing(closing) => break closing, } }; let stats = end.next().await?; @@ -113,7 +166,7 @@ impl Collection { /// It does not require that all child blobs are stored in the store. pub async fn load(db: &D, root: &Hash) -> anyhow::Result where - D: iroh_bytes::store::Map, + D: crate::store::Map, { let links_entry = db.get(root).context("links not found")?; anyhow::ensure!(links_entry.is_complete(), "links not complete"); @@ -128,14 +181,14 @@ impl Collection { meta.names.len() == links.len(), "names and links length mismatch" ); - Self::from_parts(links, meta) + Ok(Self::from_parts(links, meta)) } /// Store a collection in a store. returns the root hash of the collection /// as a TempTag. pub async fn store(self, db: &D) -> anyhow::Result where - D: iroh_bytes::store::Store, + D: crate::store::Store, { let (links, meta) = self.into_parts(); let meta_bytes = postcard::to_stdvec(&meta)?; @@ -151,85 +204,50 @@ impl Collection { /// Split a collection into a sequence of links and metadata fn into_parts(self) -> (Vec, CollectionMeta) { - let mut names = Vec::with_capacity(self.blobs().len()); - let mut links = Vec::with_capacity(self.blobs().len()); - for blob in self.blobs { - names.push(blob.name); - links.push(blob.hash); + let mut names = Vec::with_capacity(self.blobs.len()); + let mut links = Vec::with_capacity(self.blobs.len()); + for (name, hash) in self.blobs { + names.push(name); + links.push(hash); } let meta = CollectionMeta { + header: *Self::HEADER, names, - total_blobs_size: self.total_blobs_size, }; (links, meta) } /// Create a new collection from a list of hashes and metadata - fn from_parts( - links: impl IntoIterator, - meta: CollectionMeta, - ) -> anyhow::Result { - let blobs = links - .into_iter() - .zip(meta.names) - .map(|(hash, name)| Blob { name, hash }) - .collect(); - Self::new(blobs, meta.total_blobs_size) - } - - /// Create a new collection from a list of blobs and total size of the raw data - pub fn new(blobs: Vec, total_blobs_size: u64) -> anyhow::Result { - let mut blobs = blobs; - let n = blobs.len(); - blobs.sort_by(|a, b| a.name.cmp(&b.name)); - blobs.dedup_by(|a, b| a.name == b.name); - anyhow::ensure!(n == blobs.len(), "duplicate blob names"); - Ok(Self { - blobs, - total_blobs_size, - }) + fn from_parts(links: impl IntoIterator, meta: CollectionMeta) -> Self { + meta.names.into_iter().zip(links).collect() } /// Get the links to the blobs in this collection fn links(&self) -> impl Iterator + '_ { - self.blobs.iter().map(|x| x.hash) + self.blobs.iter().map(|(_name, hash)| *hash) } /// Get the names of the blobs in this collection fn names(&self) -> Vec { - self.blobs.iter().map(|x| x.name.clone()).collect() + self.blobs.iter().map(|(name, _)| name.clone()).collect() } - /// Blobs in this collection - pub fn blobs(&self) -> &[Blob] { - &self.blobs + /// Iterate over the blobs in this collection + pub fn iter(&self) -> impl Iterator { + self.blobs.iter() } - /// Take ownership of the blobs in this collection - pub fn into_inner(self) -> Vec { - self.blobs - } - - /// Total size of the raw data referred to by all blobs in this collection - pub fn total_blobs_size(&self) -> u64 { - self.total_blobs_size + /// Get the number of blobs in this collection + pub fn len(&self) -> usize { + self.blobs.len() } - /// The number of blobs in this collection - pub fn total_entries(&self) -> u64 { - self.blobs.len() as u64 + /// Check if this collection is empty + pub fn is_empty(&self) -> bool { + self.blobs.is_empty() } } -/// A blob entry of a collection -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Blob { - /// The name of this blob of data - pub name: String, - /// The hash of the blob of data - pub hash: Hash, -} - #[cfg(test)] mod tests { use super::*; @@ -237,18 +255,30 @@ mod tests { #[test] fn roundtrip_blob() { - let b = Blob { - name: "test".to_string(), - hash: blake3::Hash::from_hex( + let b = ( + "test".to_string(), + blake3::Hash::from_hex( "3aa61c409fd7717c9d9c639202af2fae470c0ef669be7ba2caea5779cb534e9d", ) .unwrap() .into(), - }; + ); let mut buf = bytes::BytesMut::zeroed(1024); postcard::to_slice(&b, &mut buf).unwrap(); - let deserialize_b: Blob = postcard::from_bytes(&buf).unwrap(); + let deserialize_b: (String, Hash) = postcard::from_bytes(&buf).unwrap(); assert_eq!(b, deserialize_b); } + + #[test] + fn roundtrip_collection_meta() { + let expected = CollectionMeta { + header: *Collection::HEADER, + names: vec!["test".to_string(), "a".to_string(), "b".to_string()], + }; + let mut buf = bytes::BytesMut::zeroed(1024); + postcard::to_slice(&expected, &mut buf).unwrap(); + let actual: CollectionMeta = postcard::from_bytes(&buf).unwrap(); + assert_eq!(expected, actual); + } } diff --git a/iroh-bytes/src/lib.rs b/iroh-bytes/src/lib.rs index 65410498ec..1b34a98c6e 100644 --- a/iroh-bytes/src/lib.rs +++ b/iroh-bytes/src/lib.rs @@ -3,6 +3,7 @@ #![deny(missing_docs, rustdoc::broken_intra_doc_links)] #![recursion_limit = "256"] +pub mod format; pub mod get; pub mod hashseq; pub mod protocol; diff --git a/iroh/examples/collection.rs b/iroh/examples/collection.rs index ee3fc29627..01826abe59 100644 --- a/iroh/examples/collection.rs +++ b/iroh/examples/collection.rs @@ -6,8 +6,7 @@ //! This is using an in memory database and a random node id. //! run this example from the project root: //! $ cargo run -p collection -use iroh::collection::{Blob, Collection}; -use iroh_bytes::BlobFormat; +use iroh_bytes::{format::collection::Collection, BlobFormat, Hash}; use tokio_util::task::LocalPoolHandle; use tracing_subscriber::{prelude::*, EnvFilter}; @@ -29,15 +28,11 @@ async fn main() -> anyhow::Result<()> { ("blob2", b"the second blob of bytes".to_vec()), ]); // create blobs from the data - let blobs = names + let collection: Collection = names .into_iter() - .map(|(name, hash)| Blob { - name, - hash: hash.into(), - }) + .map(|(name, hash)| (name, Hash::from(hash))) .collect(); // create a collection and add it to the db as well - let collection = Collection::new(blobs, 0)?; let hash = db.insert_many(collection.to_blobs()).unwrap(); // create a new local pool handle with 1 worker thread let lp = LocalPoolHandle::new(1); diff --git a/iroh/src/lib.rs b/iroh/src/lib.rs index 8164091d93..ad51f24ad3 100644 --- a/iroh/src/lib.rs +++ b/iroh/src/lib.rs @@ -13,7 +13,6 @@ pub use iroh_sync as sync; pub use iroh_base::base32; pub mod client; -pub mod collection; pub mod dial; pub mod downloader; pub mod get; diff --git a/iroh/src/node.rs b/iroh/src/node.rs index 471a034dcf..f4c26d76c4 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -19,6 +19,7 @@ use anyhow::{anyhow, bail, Context, Result}; use futures::future::{BoxFuture, Shared}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; use iroh_base::rpc::RpcResult; +use iroh_bytes::format::collection::Collection; use iroh_bytes::hashseq::parse_hash_seq; use iroh_bytes::provider::DownloadProgress; use iroh_bytes::store::{ @@ -26,9 +27,7 @@ use iroh_bytes::store::{ Store as BaoStore, ValidateProgress, }; use iroh_bytes::util::progress::{FlumeProgressSender, IdGenerator, ProgressSender}; -use iroh_bytes::{ - protocol::Closed, provider::AddProgress, BlobFormat, Hash, HashAndFormat, TempTag, -}; +use iroh_bytes::{protocol::Closed, provider::AddProgress, BlobFormat, Hash, HashAndFormat}; use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; use iroh_io::AsyncSliceReader; use iroh_net::magic_endpoint::get_alpn; @@ -1068,11 +1067,10 @@ impl RpcHandler { ExportMode::Copy }; if recursive { - use crate::collection::{Blob, Collection}; use crate::util::io::pathbuf_from_name; tokio::fs::create_dir_all(&path).await?; let collection = Collection::load(db, &hash).await?; - for Blob { hash, name } in collection.blobs() { + for (name, hash) in collection.into_iter() { #[allow(clippy::needless_borrow)] let path = path.join(pathbuf_from_name(&name)); if let Some(parent) = path.parent() { @@ -1081,7 +1079,7 @@ impl RpcHandler { trace!("exporting blob {} to {}", hash, path.display()); let id = progress.new_id(); let progress1 = progress.clone(); - db.export(*hash, path, mode, move |offset| { + db.export(hash, path, mode, move |offset| { Ok(progress1.try_send(DownloadProgress::ExportProgress { id, offset })?) }) .await?; @@ -1199,10 +1197,7 @@ impl RpcHandler { msg: BlobAddPathRequest, progress: flume::Sender, ) -> anyhow::Result<()> { - use crate::{ - collection::{Blob, Collection}, - rpc_protocol::WrapOption, - }; + use crate::rpc_protocol::WrapOption; use futures::TryStreamExt; use iroh_bytes::store::ImportMode; use std::collections::BTreeMap; @@ -1253,7 +1248,7 @@ impl RpcHandler { // import all files below root recursively let data_sources = crate::util::fs::scan_path(root, wrap)?; const IO_PARALLELISM: usize = 4; - let result: Vec<(Blob, u64, TempTag)> = futures::stream::iter(data_sources) + let result: Vec<_> = futures::stream::iter(data_sources) .map(|source| { let import_progress = import_progress.clone(); let db = self.inner.db.clone(); @@ -1268,19 +1263,18 @@ impl RpcHandler { ) .await?; let hash = *tag.hash(); - let blob = Blob { hash, name }; - io::Result::Ok((blob, size, tag)) + io::Result::Ok((name, hash, size, tag)) } }) .buffered(IO_PARALLELISM) .try_collect::>() .await?; - let total_blobs_size = result.iter().map(|(_, size, _)| *size).sum(); // create a collection - let (blobs, _child_tags): (Vec<_>, Vec<_>) = - result.into_iter().map(|(blob, _, tag)| (blob, tag)).unzip(); - let collection = Collection::new(blobs, total_blobs_size)?; + let (collection, _child_tags): (Collection, Vec<_>) = result + .into_iter() + .map(|(name, hash, _, tag)| ((name, hash), tag)) + .unzip(); collection.store(&self.inner.db).await? } else { diff --git a/iroh/tests/cli.rs b/iroh/tests/cli.rs index 82a550b466..da8d58ea4e 100644 --- a/iroh/tests/cli.rs +++ b/iroh/tests/cli.rs @@ -271,7 +271,7 @@ fn cli_provide_tree_resume() -> Result<()> { let get_output = get.unchecked().run()?; assert!(get_output.status.success()); let matches = explicit_matches(match_get_stderr(get_output.stderr)?); - assert_eq!(matches, vec!["112.88 KiB"]); + assert_eq!(matches, vec!["112.89 KiB"]); compare_files(&src, &tgt)?; std::fs::remove_dir_all(&tgt)?; } diff --git a/iroh/tests/provide.rs b/iroh/tests/provide.rs index 80ed818862..c93f71a390 100644 --- a/iroh/tests/provide.rs +++ b/iroh/tests/provide.rs @@ -2,17 +2,13 @@ use std::{ collections::BTreeMap, net::SocketAddr, ops::Range, - path::PathBuf, time::{Duration, Instant}, }; use anyhow::{anyhow, Context, Result}; use bytes::Bytes; use futures::FutureExt; -use iroh::{ - collection::{Blob, Collection}, - node::{Builder, Event, Node}, -}; +use iroh::node::{Builder, Event, Node}; use iroh_net::{key::SecretKey, NodeId}; use quic_rpc::transport::misc::DummyServerEndpoint; use rand::RngCore; @@ -20,6 +16,7 @@ use tokio::sync::mpsc; use bao_tree::{blake3, ChunkNum, ChunkRanges}; use iroh_bytes::{ + format::collection::Collection, get::{ fsm::ConnectedNext, fsm::{self, DecodeError}, @@ -151,21 +148,15 @@ async fn multiple_clients() -> Result<()> { let mut db = iroh_bytes::store::readonly_mem::Store::default(); let expect_hash = db.insert(content.as_slice()); - let expect_name = "hello_world".to_string(); - let collection = Collection::new( - vec![Blob { - name: expect_name.clone(), - hash: expect_hash, - }], - 0, - )?; + let expect_name = "hello_world"; + let collection = Collection::from_iter([(expect_name, expect_hash)]); let hash = db.insert_many(collection.to_blobs()).unwrap(); let lp = test_local_pool(); let node = test_node(db).local_pool(&lp).spawn().await?; let mut tasks = Vec::new(); for _i in 0..3 { let file_hash: Hash = expect_hash; - let name = expect_name.clone(); + let name = expect_name; let addrs = node.local_address().unwrap(); let peer_id = node.node_id(); let content = content.to_vec(); @@ -174,12 +165,12 @@ async fn multiple_clients() -> Result<()> { async move { let opts = get_options(peer_id, addrs); let expected_data = &content; - let expected_name = &name; + let expected_name = name; let request = GetRequest::all(hash); let (collection, children, _stats) = run_collection_get_request(opts, request).await?; - assert_eq!(expected_name, &collection.blobs()[0].name); - assert_eq!(&file_hash, &collection.blobs()[0].hash); + assert_eq!(expected_name, &collection[0].0); + assert_eq!(&file_hash, &collection[0].1); assert_eq!(expected_data, &children[&0]); anyhow::Ok(()) @@ -217,34 +208,25 @@ where let mut expects = Vec::new(); let num_blobs = file_opts.len(); - let (mut mdb, lookup) = iroh_bytes::store::readonly_mem::Store::new(file_opts.clone()); + let (mut mdb, _lookup) = iroh_bytes::store::readonly_mem::Store::new(file_opts.clone()); let mut blobs = Vec::new(); - let mut total_blobs_size = 0u64; for opt in file_opts.into_iter() { let (name, data) = opt; - let name = name.into(); + let name: String = name.into(); println!("Sending {}: {}b", name, data.len()); - let path = PathBuf::from(&name); // get expected hash of file let hash = blake3::hash(&data); let hash = Hash::from(hash); - let blob = Blob { - name: name.clone(), - hash, - }; + let blob = (name.clone(), hash); blobs.push(blob); - total_blobs_size += data.len() as u64; // keep track of expected values - expects.push((name, path, hash)); + expects.push((name, hash)); } - let collection = Collection::new(blobs, total_blobs_size)?; - let collection_hash = mdb.insert_many(collection.to_blobs()).unwrap(); - - // sort expects by name to match the canonical order of blobs - expects.sort_by(|a, b| a.0.cmp(&b.0)); + let collection_orig = Collection::from_iter(blobs); + let collection_hash = mdb.insert_many(collection_orig.to_blobs()).unwrap(); let node = test_node(mdb.clone()).local_pool(rt).spawn().await?; @@ -263,15 +245,14 @@ where let opts = get_options(node.node_id(), addrs); let request = GetRequest::all(collection_hash); let (collection, children, _stats) = run_collection_get_request(opts, request).await?; - assert_eq!(num_blobs, collection.blobs().len()); - for (i, (name, hash)) in lookup.into_iter().enumerate() { - let hash = Hash::from(hash); - let blob = &collection.blobs()[i]; - let expect = mdb.get(&hash).unwrap(); + assert_eq!(num_blobs, collection.len()); + for (i, (expected_name, expected_hash)) in expects.iter().enumerate() { + let (name, hash) = &collection[i]; let got = &children[&(i as u64)]; - assert_eq!(name, blob.name); - assert_eq!(hash, blob.hash); - assert_eq!(&expect, got); + let expected = mdb.get(expected_hash).unwrap(); + assert_eq!(expected_name, name); + assert_eq!(expected_hash, hash); + assert_eq!(expected, got); } // We have to wait for the completed event before shutting down the node. @@ -342,14 +323,7 @@ async fn test_server_close() { let _guard = iroh_test::logging::setup(); let mut db = iroh_bytes::store::readonly_mem::Store::default(); let child_hash = db.insert(b"hello there"); - let collection = Collection::new( - vec![Blob { - name: "hello".to_string(), - hash: child_hash, - }], - 0, - ) - .unwrap(); + let collection = Collection::from_iter([("hello", child_hash)]); let hash = db.insert_many(collection.to_blobs()).unwrap(); let mut node = test_node(db).local_pool(&lp).spawn().await.unwrap(); let node_addr = node.local_endpoint_addresses().await.unwrap(); @@ -402,17 +376,7 @@ fn create_test_db( entries: impl IntoIterator, impl AsRef<[u8]>)>, ) -> (iroh_bytes::store::readonly_mem::Store, Hash) { let (mut db, hashes) = iroh_bytes::store::readonly_mem::Store::new(entries); - let collection = Collection::new( - hashes - .into_iter() - .map(|(name, hash)| Blob { - name, - hash: hash.into(), - }) - .collect(), - 0, - ) - .unwrap(); + let collection = Collection::from_iter(hashes); let hash = db.insert_many(collection.to_blobs()).unwrap(); (db, hash) } @@ -553,12 +517,12 @@ async fn test_run_ticket() { /// Utility to validate that the children of a collection are correct fn validate_children(collection: Collection, children: BTreeMap) -> anyhow::Result<()> { - let blobs = collection.into_inner(); + let blobs = collection.into_iter().collect::>(); anyhow::ensure!(blobs.len() == children.len()); - for (child, blob) in blobs.into_iter().enumerate() { + for (child, (_name, hash)) in blobs.into_iter().enumerate() { let child = child as u64; let data = children.get(&child).unwrap(); - anyhow::ensure!(blob.hash == blake3::hash(data).into()); + anyhow::ensure!(hash == blake3::hash(data).into()); } Ok(()) }