diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ab18749e1..6064bb05d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -203,11 +203,13 @@ jobs: - name: Run cargo-tarpaulin uses: actions-rs/tarpaulin@v0.1 with: + version: '0.20.1' # Force cleaning via `--force-clean` flag to prevent buggy code coverage args: >- --manifest-path ${{ env.cargo_manifest }} --locked --force-clean + -- --test-threads=1 env: # Ensure debug output is also tested RUST_LOG: debug diff --git a/CHANGELOG.md b/CHANGELOG.md index 609505790..678911322 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - GraphQL replication service gets and verifies new entries and inserts them into the db [#137](https://github.com/p2panda/aquadoggo/pull/137) +- `validation` and `domain` modules used for publish and next args API [#204](https://github.com/p2panda/aquadoggo/pull/204) - Add schema task and schema provider that update when new schema views are materialised [#166](https://github.com/p2panda/aquadoggo/pull/166) - Service ready signal [#218](https://github.com/p2panda/aquadoggo/pull/218) diff --git a/Cargo.lock b/Cargo.lock index ecde301b9..aa1647d82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,7 +2039,7 @@ dependencies = [ [[package]] name = "p2panda-rs" version = "0.4.0" -source = "git+https://github.com/p2panda/p2panda?rev=e06fd08c45253d60fcd42778f59e946a9ed73f71#e06fd08c45253d60fcd42778f59e946a9ed73f71" +source = "git+https://github.com/p2panda/p2panda?rev=5d6508d5a9b4b766621c3bd14879cc568fbac02d#5d6508d5a9b4b766621c3bd14879cc568fbac02d" dependencies = [ "arrayvec 0.5.2", "async-trait", diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index e6a7e75be..6ed2f6b93 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -31,7 +31,7 @@ gql_client = "^1.0.6" lipmaa-link = "^0.2.2" log = "^0.4.17" openssl-probe = "^0.1.5" -p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "e06fd08c45253d60fcd42778f59e946a9ed73f71" } +p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "5d6508d5a9b4b766621c3bd14879cc568fbac02d" } serde = { version = "^1.0.137", features = ["derive"] } sqlx = { version = "^0.6.0", features = [ "any", @@ -61,7 +61,7 @@ hex = "0.4.3" http = "^0.2.8" hyper = "^0.14.19" once_cell = "^1.12.0" -p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "e06fd08c45253d60fcd42778f59e946a9ed73f71", features = [ +p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "5d6508d5a9b4b766621c3bd14879cc568fbac02d", features = [ "testing", ] } rand = "^0.8.5" diff --git a/aquadoggo/src/db/mod.rs b/aquadoggo/src/db/mod.rs index 46196bc12..aeb6faaaa 100644 --- a/aquadoggo/src/db/mod.rs +++ b/aquadoggo/src/db/mod.rs @@ -8,7 +8,6 @@ use sqlx::migrate::MigrateDatabase; pub mod errors; pub mod models; pub mod provider; -pub mod request; pub mod stores; pub mod traits; pub mod utils; diff --git a/aquadoggo/src/db/provider.rs b/aquadoggo/src/db/provider.rs index f435edd5a..971092f14 100644 --- a/aquadoggo/src/db/provider.rs +++ b/aquadoggo/src/db/provider.rs @@ -7,13 +7,14 @@ use p2panda_rs::operation::VerifiedOperation; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::errors::OperationStorageError; use p2panda_rs::storage_provider::traits::StorageProvider; +use p2panda_rs::test_utils::db::{ + EntryArgsRequest, EntryArgsResponse, PublishEntryRequest, PublishEntryResponse, +}; use sqlx::query_scalar; -use crate::db::request::{EntryArgsRequest, PublishEntryRequest}; use crate::db::stores::{StorageEntry, StorageLog}; use crate::db::Pool; -use crate::errors::StorageProviderResult; -use crate::graphql::client::NextEntryArguments; +use crate::errors::Result; /// Sql based storage that implements `StorageProvider`. #[derive(Clone, Debug)] @@ -31,21 +32,21 @@ impl SqlStorage { /// A `StorageProvider` implementation based on `sqlx` that supports SQLite and PostgreSQL /// databases. #[async_trait] -impl StorageProvider for SqlStorage { - type EntryArgsResponse = NextEntryArguments; +impl StorageProvider for SqlStorage { type EntryArgsRequest = EntryArgsRequest; - type PublishEntryResponse = NextEntryArguments; + type EntryArgsResponse = EntryArgsResponse; type PublishEntryRequest = PublishEntryRequest; + type PublishEntryResponse = PublishEntryResponse; + type StorageLog = StorageLog; + type StorageEntry = StorageEntry; + type StorageOperation = VerifiedOperation; /// Returns the related document for any entry. /// /// Every entry is part of a document and, through that, associated with a specific log id used /// by this document and author. This method returns that document id by looking up the log /// that the entry was stored in. - async fn get_document_by_entry( - &self, - entry_hash: &Hash, - ) -> StorageProviderResult> { + async fn get_document_by_entry(&self, entry_hash: &Hash) -> Result> { let result: Option = query_scalar( " SELECT @@ -81,7 +82,7 @@ impl SqlStorage { pub async fn get_schema_by_document_view( &self, view_id: &DocumentViewId, - ) -> StorageProviderResult> { + ) -> Result> { let result: Option = query_scalar( " SELECT @@ -92,7 +93,7 @@ impl SqlStorage { document_view_id = $1 ", ) - .bind(view_id.as_str()) + .bind(view_id.to_string()) .fetch_optional(&self.pool) .await .map_err(|e| OperationStorageError::FatalStorageError(e.to_string()))?; @@ -120,8 +121,10 @@ mod tests { use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; + use super::SqlStorage; + /// Inserts a `DocumentView` into the db and returns its view id. - async fn insert_document_view(db: &TestDatabase) -> DocumentViewId { + async fn insert_document_view(db: &TestDatabase) -> DocumentViewId { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let entry = db .store @@ -153,7 +156,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_document_view(&db).await; let result = db .store @@ -172,7 +175,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let result = db .store .get_schema_by_document_view(&random_document_view_id) diff --git a/aquadoggo/src/db/request.rs b/aquadoggo/src/db/request.rs deleted file mode 100644 index 21a4db9bb..000000000 --- a/aquadoggo/src/db/request.rs +++ /dev/null @@ -1,80 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use p2panda_rs::document::DocumentId; -use p2panda_rs::storage_provider::traits::{AsEntryArgsRequest, AsPublishEntryRequest}; -use p2panda_rs::storage_provider::ValidationError; -use p2panda_rs::Validate; -use serde::Deserialize; - -use p2panda_rs::entry::{decode_entry, EntrySigned}; -use p2panda_rs::identity::Author; -use p2panda_rs::operation::OperationEncoded; - -/// Struct used to validate params and query database to retreive next entry arguments. -#[derive(Deserialize, Debug)] -pub struct EntryArgsRequest { - /// The entry author. - pub public_key: Author, - - /// The entry document id. - pub document_id: Option, -} - -impl AsEntryArgsRequest for EntryArgsRequest { - fn author(&self) -> &Author { - &self.public_key - } - - fn document_id(&self) -> &Option { - &self.document_id - } -} - -impl Validate for EntryArgsRequest { - type Error = ValidationError; - - fn validate(&self) -> Result<(), Self::Error> { - // Validate `author` request parameter - self.author().validate()?; - - // Validate `document` request parameter when it is set - match self.document_id() { - None => (), - Some(doc) => { - doc.validate()?; - } - }; - Ok(()) - } -} - -/// Struct used to validate params and publish new entry in database. -#[derive(Deserialize, Debug)] -pub struct PublishEntryRequest { - /// The encoded entry - pub entry: EntrySigned, - - /// The encoded operation - pub operation: OperationEncoded, -} - -impl AsPublishEntryRequest for PublishEntryRequest { - fn entry_signed(&self) -> &EntrySigned { - &self.entry - } - - fn operation_encoded(&self) -> &OperationEncoded { - &self.operation - } -} - -impl Validate for PublishEntryRequest { - type Error = ValidationError; - - fn validate(&self) -> Result<(), Self::Error> { - self.entry_signed().validate()?; - self.operation_encoded().validate()?; - decode_entry(self.entry_signed(), Some(self.operation_encoded()))?; - Ok(()) - } -} diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index b619e27d5..261b70dab 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -51,7 +51,7 @@ impl DocumentStore for SqlStorage { ($1, $2, $3) ", ) - .bind(document_view.id().as_str()) + .bind(document_view.id().to_string()) .bind(value.id().as_str().to_owned()) .bind(name) .execute(&self.pool) @@ -71,8 +71,8 @@ impl DocumentStore for SqlStorage { ($1, $2) ", ) - .bind(document_view.id().as_str()) - .bind(schema_id.as_str()) + .bind(document_view.id().to_string()) + .bind(schema_id.to_string()) .execute(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; @@ -129,7 +129,7 @@ impl DocumentStore for SqlStorage { operation_fields_v1.list_index ASC ", ) - .bind(id.as_str()) + .bind(id.to_string()) .fetch_all(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; @@ -173,9 +173,9 @@ impl DocumentStore for SqlStorage { ", ) .bind(document.id().as_str()) - .bind(document.view_id().as_str()) + .bind(document.view_id().to_string()) .bind(document.is_deleted()) - .bind(document.schema().as_str()) + .bind(document.schema().to_string()) .execute(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; @@ -283,7 +283,7 @@ impl DocumentStore for SqlStorage { operation_fields_v1.list_index ASC ", ) - .bind(schema_id.as_str()) + .bind(schema_id.to_string()) .fetch_all(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; @@ -333,6 +333,7 @@ mod tests { }; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::document::{DocumentStore, DocumentView}; use crate::db::stores::entry::StorageEntry; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; @@ -373,7 +374,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -435,7 +436,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let view_does_not_exist = db .store .get_document_view_by_id(&random_document_view_id) @@ -452,7 +453,7 @@ mod tests { #[with(10, 1, 1, false, SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let schema_id = SchemaId::from_str(SCHEMA_ID).unwrap(); @@ -510,7 +511,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, operation: Operation, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view = DocumentView::new( &document_view_id, &DocumentViewFields::new_from_operation_fields( @@ -534,7 +535,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -581,7 +582,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -628,7 +629,7 @@ mod tests { #[with(10, 1, 1, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -655,7 +656,7 @@ mod tests { #[with(10, 1, 1, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -686,7 +687,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -721,7 +722,7 @@ mod tests { #[with(10, 2, 1, false, SCHEMA_ID.parse().unwrap())] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let schema_id = SchemaId::from_str(SCHEMA_ID).unwrap(); for document_id in &db.test_data.documents { diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 4f0e87a46..c083dfc7f 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -309,7 +309,7 @@ impl EntryStore for SqlStorage { logs.schema = $1 ", ) - .bind(schema.as_str()) + .bind(schema.to_string()) .fetch_all(&self.pool) .await .map_err(|e| EntryStorageError::Custom(e.to_string()))?; @@ -430,12 +430,13 @@ mod tests { use p2panda_rs::test_utils::fixtures::{entry, key_pair}; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::entry::StorageEntry; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] fn insert_entry(key_pair: KeyPair, entry: Entry, #[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap(); let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap(); @@ -451,7 +452,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let log_id = LogId::default(); @@ -480,7 +481,7 @@ mod tests { #[with(20, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); let log_id = LogId::default(); @@ -509,7 +510,7 @@ mod tests { #[with(20, 1, 2, false, SCHEMA_ID.parse().unwrap())] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let schema_not_in_the_db = SchemaId::new_application( "venue", &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), @@ -539,7 +540,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -589,7 +590,7 @@ mod tests { #[with(20, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -628,7 +629,7 @@ mod tests { #[with(30, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -665,7 +666,7 @@ mod tests { #[with(20, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); diff --git a/aquadoggo/src/db/stores/log.rs b/aquadoggo/src/db/stores/log.rs index 76851ddf7..93f8227e2 100644 --- a/aquadoggo/src/db/stores/log.rs +++ b/aquadoggo/src/db/stores/log.rs @@ -83,7 +83,7 @@ impl LogStore for SqlStorage { .bind(log.author().as_str()) .bind(log.id().as_u64().to_string()) .bind(log.document_id().as_str()) - .bind(log.schema_id().as_str()) + .bind(log.schema_id().to_string()) .execute(&self.pool) .await .map_err(|e| LogStorageError::Custom(e.to_string()))? @@ -125,6 +125,9 @@ impl LogStore for SqlStorage { } /// Determines the next unused log_id of an author. + /// + /// @TODO: This will be deprecated as functionality is replaced by + /// `latest_log_id + validated next log id methods. async fn next_log_id(&self, author: &Author) -> Result { // Get all log ids from this author let mut result: Vec = query_scalar( @@ -169,11 +172,46 @@ impl LogStore for SqlStorage { } // Otherwise, try next possible log id - next_log_id = next_log_id.next().unwrap(); + next_log_id = match next_log_id.next() { + Some(log_id) => Ok(log_id), + None => Err(LogStorageError::Custom("Max log id reached".to_string())), + }?; } Ok(next_log_id) } + + /// Determines the latest `LogId` of an author. + /// + /// Returns either the highest known `LogId` for an author or `None` if no logs are known from + /// the passed author. + async fn latest_log_id(&self, author: &Author) -> Result, LogStorageError> { + // Get all log ids from this author + let result: Option = query_scalar( + " + SELECT + log_id + FROM + logs + WHERE + author = $1 + ORDER BY + CAST(log_id AS NUMERIC) DESC LIMIT 1 + ", + ) + .bind(author.as_str()) + .fetch_optional(&self.pool) + .await + .map_err(|e| LogStorageError::Custom(e.to_string()))?; + + // Convert string representing u64 integers to `LogId` instance + let log_id: Option = result.map(|str| { + str.parse() + .unwrap_or_else(|_| panic!("Corrupt u64 integer found in database: '{0}'", &str)) + }); + + Ok(log_id) + } } #[cfg(test)] @@ -193,6 +231,7 @@ mod tests { }; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::entry::StorageEntry; use crate::db::stores::log::StorageLog; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; @@ -202,7 +241,7 @@ mod tests { #[from(public_key)] author: Author, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); assert_eq!(log_id, LogId::default()); }); @@ -215,7 +254,7 @@ mod tests { #[from(random_document_id)] document: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log = StorageLog::new(&author, &schema, &document.clone(), &LogId::default()); assert!(db.store.insert_log(log).await.is_ok()); @@ -232,7 +271,7 @@ mod tests { #[from(random_document_id)] document: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let schema = SchemaId::new_application( "venue", &DocumentViewId::new(&[operation_id_1, operation_id_2]).unwrap(), @@ -250,7 +289,7 @@ mod tests { #[from(schema)] schema: SchemaId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); // We expect to be given the next log id when asking for a possible log id for a new @@ -271,6 +310,28 @@ mod tests { }); } + #[rstest] + fn latest_log_id( + #[from(public_key)] author: Author, + #[from(schema)] schema: SchemaId, + #[from(test_db)] runner: TestDatabaseRunner, + #[from(random_document_id)] document_id: DocumentId, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log_id = db.store.latest_log_id(&author).await.unwrap(); + + assert_eq!(log_id, None); + + for n in 0..12 { + let log = StorageLog::new(&author, &schema, &document_id, &LogId::new(n)); + db.store.insert_log(log).await.unwrap(); + + let log_id = db.store.latest_log_id(&author).await.unwrap(); + assert_eq!(Some(LogId::new(n)), log_id); + } + }); + } + #[rstest] fn document_log_id( #[from(schema)] schema: SchemaId, @@ -278,7 +339,7 @@ mod tests { #[from(operation_encoded)] operation_encoded: OperationEncoded, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Expect database to return nothing yet assert_eq!( db.store @@ -335,7 +396,7 @@ mod tests { #[from(random_document_id)] document_third: DocumentId, #[from(random_document_id)] document_forth: DocumentId, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Register two log ids at the beginning let log_1 = StorageLog::new(&author, &schema, &document_first, &LogId::default()); let log_2 = StorageLog::new(&author, &schema, &document_second, &LogId::new(1)); @@ -347,7 +408,7 @@ mod tests { let log_id = db.store.next_log_id(&author).await.unwrap(); assert_eq!(log_id, LogId::new(2)); - let log_3 = StorageLog::new(&author, &schema, &document_third.into(), &log_id); + let log_3 = StorageLog::new(&author, &schema, &document_third, &log_id); db.store.insert_log(log_3).await.unwrap(); @@ -355,7 +416,7 @@ mod tests { let log_id = db.store.next_log_id(&author).await.unwrap(); assert_eq!(log_id, LogId::new(3)); - let log_4 = StorageLog::new(&author, &schema, &document_forth.into(), &log_id); + let log_4 = StorageLog::new(&author, &schema, &document_forth, &log_id); db.store.insert_log(log_4).await.unwrap(); diff --git a/aquadoggo/src/db/stores/operation.rs b/aquadoggo/src/db/stores/operation.rs index b3f43e24e..ed8ab6ae9 100644 --- a/aquadoggo/src/db/stores/operation.rs +++ b/aquadoggo/src/db/stores/operation.rs @@ -98,11 +98,11 @@ impl OperationStore for SqlStorage { .bind(document_id.as_str()) .bind(operation.operation_id().as_str()) .bind(operation.action().as_str()) - .bind(operation.schema().as_str()) + .bind(operation.schema().to_string()) .bind( operation .previous_operations() - .map(|document_view_id| document_view_id.as_str()), + .map(|document_view_id| document_view_id.to_string()), ) .execute(&self.pool) .await @@ -294,6 +294,7 @@ mod tests { }; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] @@ -309,7 +310,7 @@ mod tests { document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Construct the storage operation. let operation = VerifiedOperation::new(&author, &operation_id, &operation).unwrap(); @@ -337,7 +338,7 @@ mod tests { document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { db.store .insert_operation(&verified_operation, &document_id) .await @@ -362,7 +363,7 @@ mod tests { document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { assert!(db .store .get_document_by_operation_id(create_operation.operation_id()) @@ -407,7 +408,7 @@ mod tests { #[with(5, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); let latest_entry = db diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index eb4c787da..14b51ba81 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -184,7 +184,7 @@ mod tests { ) { let cddl_str = cddl_str.to_string(); - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; @@ -251,7 +251,7 @@ mod tests { ) { let err_str = err_str.to_string(); - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; @@ -298,7 +298,7 @@ mod tests { key_pair: KeyPair, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let document_view_id = insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; @@ -324,7 +324,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, key_pair: KeyPair, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_schema_definition( &db.store, &key_pair, diff --git a/aquadoggo/src/db/stores/task.rs b/aquadoggo/src/db/stores/task.rs index c82e08f9d..3707b1b77 100644 --- a/aquadoggo/src/db/stores/task.rs +++ b/aquadoggo/src/db/stores/task.rs @@ -19,7 +19,7 @@ impl SqlStorage { let document_view_id = task_input .document_view_id .as_ref() - .map(|view_id| view_id.as_str()); + .map(|view_id| view_id.to_string()); // Insert task into database query( @@ -53,7 +53,7 @@ impl SqlStorage { let document_view_id = task_input .document_view_id .as_ref() - .map(|view_id| view_id.as_str()); + .map(|view_id| view_id.to_string()); // Remove task from database let result = query( @@ -128,6 +128,7 @@ mod tests { use p2panda_rs::test_utils::fixtures::{document_id, document_view_id}; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::materializer::{Task, TaskInput}; @@ -136,7 +137,7 @@ mod tests { document_view_id: DocumentViewId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Prepare test data let task = Task::new("reduce", TaskInput::new(None, Some(document_view_id))); @@ -160,7 +161,7 @@ mod tests { #[rstest] fn avoid_duplicates(document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Prepare test data let task = Task::new("reduce", TaskInput::new(Some(document_id), None)); diff --git a/aquadoggo/src/db/stores/test_utils.rs b/aquadoggo/src/db/stores/test_utils.rs deleted file mode 100644 index a196ee5b3..000000000 --- a/aquadoggo/src/db/stores/test_utils.rs +++ /dev/null @@ -1,571 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use std::convert::TryFrom; -use std::sync::Arc; - -use futures::Future; -use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; -use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned}; -use p2panda_rs::hash::Hash; -use p2panda_rs::identity::{Author, KeyPair}; -use p2panda_rs::operation::{ - AsOperation, AsVerifiedOperation, Operation, OperationEncoded, OperationId, OperationValue, - PinnedRelation, PinnedRelationList, Relation, RelationList, VerifiedOperation, -}; -use p2panda_rs::schema::SchemaId; -use p2panda_rs::storage_provider::traits::{OperationStore, StorageProvider}; -use p2panda_rs::test_utils::constants::{PRIVATE_KEY, SCHEMA_ID}; -use p2panda_rs::test_utils::fixtures::{operation, operation_fields}; -use rstest::fixture; -use sqlx::migrate::MigrateDatabase; -use sqlx::Any; -use tokio::runtime::Builder; -use tokio::sync::Mutex; - -use crate::db::provider::SqlStorage; -use crate::db::request::{EntryArgsRequest, PublishEntryRequest}; -use crate::db::traits::DocumentStore; -use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; -use crate::graphql::client::NextEntryArguments; -use crate::test_helpers::TEST_CONFIG; - -/// The fields used as defaults in the tests. -pub fn doggo_test_fields() -> Vec<(&'static str, OperationValue)> { - vec![ - ("username", OperationValue::Text("bubu".to_owned())), - ("height", OperationValue::Float(3.5)), - ("age", OperationValue::Integer(28)), - ("is_admin", OperationValue::Boolean(false)), - ( - "profile_picture", - OperationValue::Relation(Relation::new( - Hash::new("0020eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee") - .unwrap() - .into(), - )), - ), - ( - "special_profile_picture", - OperationValue::PinnedRelation(PinnedRelation::new( - Hash::new("0020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - .unwrap() - .into(), - )), - ), - ( - "many_profile_pictures", - OperationValue::RelationList(RelationList::new(vec![ - Hash::new("0020aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - .unwrap() - .into(), - Hash::new("0020bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - .unwrap() - .into(), - ])), - ), - ( - "many_special_profile_pictures", - OperationValue::PinnedRelationList(PinnedRelationList::new(vec![ - Hash::new("0020cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") - .unwrap() - .into(), - Hash::new("0020dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") - .unwrap() - .into(), - ])), - ), - ( - "another_relation_field", - OperationValue::PinnedRelationList(PinnedRelationList::new(vec![ - Hash::new("0020abababababababababababababababababababababababababababababababab") - .unwrap() - .into(), - Hash::new("0020cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd") - .unwrap() - .into(), - ])), - ), - ] -} - -/// Helper for creating many key_pairs. -pub fn test_key_pairs(no_of_authors: usize) -> Vec { - let mut key_pairs = vec![KeyPair::from_private_key_str(PRIVATE_KEY).unwrap()]; - - for _index in 1..no_of_authors { - key_pairs.push(KeyPair::new()) - } - - key_pairs -} - -/// Helper for constructing a publish entry request. -pub async fn construct_publish_entry_request( - provider: &SqlStorage, - operation: &Operation, - key_pair: &KeyPair, - document_id: Option<&DocumentId>, -) -> PublishEntryRequest { - let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); - let entry_args_request = EntryArgsRequest { - public_key: author.clone(), - document_id: document_id.cloned(), - }; - let next_entry_args = provider.get_entry_args(&entry_args_request).await.unwrap(); - - let entry = Entry::new( - &next_entry_args.log_id.into(), - Some(operation), - next_entry_args.skiplink.map(Hash::from).as_ref(), - next_entry_args.backlink.map(Hash::from).as_ref(), - &next_entry_args.seq_num.into(), - ) - .unwrap(); - - let entry = sign_and_encode(&entry, key_pair).unwrap(); - let operation = OperationEncoded::try_from(operation).unwrap(); - PublishEntryRequest { entry, operation } -} - -/// Helper for inserting an entry, operation and document_view into the database. -pub async fn insert_entry_operation_and_view( - provider: &SqlStorage, - key_pair: &KeyPair, - document_id: Option<&DocumentId>, - operation: &Operation, -) -> (DocumentId, DocumentViewId) { - if !operation.is_create() && document_id.is_none() { - panic!("UPDATE and DELETE operations require a DocumentId to be passed") - } - - let request = construct_publish_entry_request(provider, operation, key_pair, document_id).await; - - let operation_id: OperationId = request.entry.hash().into(); - let document_id = document_id - .cloned() - .unwrap_or_else(|| request.entry.hash().into()); - - let document_view_id: DocumentViewId = request.entry.hash().into(); - - let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); - - provider.publish_entry(&request).await.unwrap(); - provider - .insert_operation( - &VerifiedOperation::new(&author, &operation_id, operation).unwrap(), - &document_id, - ) - .await - .unwrap(); - - let document_operations = provider - .get_operations_by_document_id(&document_id) - .await - .unwrap(); - - let document = DocumentBuilder::new(document_operations).build().unwrap(); - - provider.insert_document(&document).await.unwrap(); - - (document_id, document_view_id) -} - -#[async_trait::async_trait] -pub trait AsyncTestFn { - async fn call(self, db: TestDatabase); -} - -#[async_trait::async_trait] -impl AsyncTestFn for FN -where - FN: FnOnce(TestDatabase) -> F + Sync + Send, - F: Future + Send, -{ - async fn call(self, db: TestDatabase) { - self(db).await - } -} - -#[async_trait::async_trait] -pub trait AsyncTestFnWithManager { - async fn call(self, db: TestDatabaseManager); -} - -#[async_trait::async_trait] -impl AsyncTestFnWithManager for FN -where - FN: FnOnce(TestDatabaseManager) -> F + Sync + Send, - F: Future + Send, -{ - async fn call(self, db: TestDatabaseManager) { - self(db).await - } -} - -pub struct PopulateDatabaseConfig { - /// Number of entries per log/document. - pub no_of_entries: usize, - - /// Number of logs for each author. - pub no_of_logs: usize, - - /// Number of authors, each with logs populated as defined above. - pub no_of_authors: usize, - - /// A boolean flag for wether all logs should contain a delete operation. - pub with_delete: bool, - - /// The schema used for all operations in the db. - pub schema: SchemaId, - - /// The fields used for every CREATE operation. - pub create_operation_fields: Vec<(&'static str, OperationValue)>, - - /// The fields used for every UPDATE operation. - pub update_operation_fields: Vec<(&'static str, OperationValue)>, -} - -impl Default for PopulateDatabaseConfig { - fn default() -> Self { - Self { - no_of_entries: 0, - no_of_logs: 0, - no_of_authors: 0, - with_delete: false, - schema: SCHEMA_ID.parse().unwrap(), - create_operation_fields: doggo_test_fields(), - update_operation_fields: doggo_test_fields(), - } - } -} - -// @TODO: I'm keeping this here for now as otherwise we would need to refactor _all_ the tests using it. -// -// We may still want to keep this "single database" runner injected through `rstest` but in any case -// probably best to consider that in a different PR. -pub struct TestDatabaseRunner { - config: PopulateDatabaseConfig, -} - -impl TestDatabaseRunner { - /// Provides a safe way to write tests using a database which closes the pool connection - /// automatically when the test succeeds or fails. - /// - /// Takes an (async) test function as an argument and passes over the `TestDatabase` instance - /// so it can be used inside of it. - pub fn with_db_teardown(&self, test: F) { - let runtime = Builder::new_current_thread() - .worker_threads(1) - .enable_all() - .thread_name("with_db_teardown") - .build() - .expect("Could not build tokio Runtime for test"); - - runtime.block_on(async { - // Initialise test database - let pool = initialize_db().await; - let mut db = TestDatabase { - store: SqlStorage::new(pool), - test_data: TestData::default(), - }; - - // Populate the test db - populate_test_db(&mut db, &self.config).await; - - // Get a handle of the underlying database connection pool - let pool = db.store.pool.clone(); - - // Spawn the test in a separate task to make sure we have control over the possible - // panics which might happen inside of it - let handle = tokio::task::spawn(async move { - // Execute the actual test - test.call(db).await; - }); - - // Get a handle of the task so we can use it later - let result = handle.await; - - // Unwind the test by closing down the connection to the database pool. This will - // be reached even when the test panicked - pool.close().await; - - // Panic here when test failed. The test fails within its own async task and stays - // there, we need to propagate it further to inform the test runtime about the result - result.unwrap(); - }); - } -} - -/// Method which provides a safe way to write tests with the ability to build many databases and -/// have their pool connections closed automatically when the test succeeds or fails. -/// -/// Takes an (async) test function as an argument and passes over the `TestDatabaseManager` -/// instance which can be used to build databases from inside the tests. -pub fn with_db_manager_teardown(test: F) { - let runtime = Builder::new_current_thread() - .worker_threads(1) - .enable_all() - .thread_name("with_db_teardown") - .build() - .expect("Could not build tokio Runtime for test"); - - // Instantiate the database manager - let db_manager = TestDatabaseManager::new(); - - // Get a handle onto it's collection of pools - let pools = db_manager.pools.clone(); - - runtime.block_on(async { - // Spawn the test in a separate task to make sure we have control over the possible - // panics which might happen inside of it - let handle = tokio::task::spawn(async move { - // Execute the actual test - test.call(db_manager).await; - }); - - // Get a handle of the task so we can use it later - let result = handle.await; - - // Unwind the test by closing down the connections to all the database pools. This - // will be reached even when the test panicked - for pool in pools.lock().await.iter() { - pool.close().await; - } - - // Panic here when test failed. The test fails within its own async task and stays - // there, we need to propagate it further to inform the test runtime about the result - result.unwrap(); - }); -} - -/// Fixture for constructing a storage provider instance backed by a pre-populated database. -/// -/// Returns a `TestDatabaseRunner` that bootstraps a safe async test environment connecting to a -/// database. It makes sure the runner disconnects properly from the connection pool after the test -/// succeeded or even failed. -/// -/// Passed parameters define what the database should contain. The first entry in each log contains -/// a valid CREATE operation following entries contain duplicate UPDATE operations. If the -/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. -#[fixture] -pub fn test_db( - // Number of entries per log/document - #[default(0)] no_of_entries: usize, - // Number of logs for each author - #[default(0)] no_of_logs: usize, - // Number of authors, each with logs populated as defined above - #[default(0)] no_of_authors: usize, - // A boolean flag for wether all logs should contain a delete operation - #[default(false)] with_delete: bool, - // The schema used for all operations in the db - #[default(SCHEMA_ID.parse().unwrap())] schema: SchemaId, - // The fields used for every CREATE operation - #[default(doggo_test_fields())] create_operation_fields: Vec<(&'static str, OperationValue)>, - // The fields used for every UPDATE operation - #[default(doggo_test_fields())] update_operation_fields: Vec<(&'static str, OperationValue)>, -) -> TestDatabaseRunner { - let config = PopulateDatabaseConfig { - no_of_entries, - no_of_logs, - no_of_authors, - with_delete, - schema, - create_operation_fields, - update_operation_fields, - }; - - TestDatabaseRunner { config } -} - -/// Container for `SqlStore` with access to the document ids and key_pairs used in the -/// pre-populated database for testing. -pub struct TestDatabase { - pub store: SqlStorage, - pub test_data: TestData, -} - -/// Data collected when populating a `TestData` base in order to easily check values which -/// would be otherwise hard or impossible to get through the store methods. -#[derive(Default)] -pub struct TestData { - pub key_pairs: Vec, - pub documents: Vec, -} - -/// Helper method for populating a `TestDatabase` with configurable data. -/// -/// Passed parameters define what the db should contain. The first entry in each log contains a -/// valid CREATE operation following entries contain duplicate UPDATE operations. If the -/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. -pub async fn populate_test_db(db: &mut TestDatabase, config: &PopulateDatabaseConfig) { - let key_pairs = test_key_pairs(config.no_of_authors); - - for key_pair in &key_pairs { - db.test_data - .key_pairs - .push(KeyPair::from_private_key(key_pair.private_key()).unwrap()); - - for _log_id in 0..config.no_of_logs { - let mut document_id: Option = None; - let mut previous_operation: Option = None; - - for index in 0..config.no_of_entries { - // Create an operation based on the current index and whether this document should - // contain a DELETE operation - let next_operation_fields = match index { - // First operation is CREATE - 0 => Some(operation_fields(config.create_operation_fields.clone())), - // Last operation is DELETE if the with_delete flag is set - seq if seq == (config.no_of_entries - 1) && config.with_delete => None, - // All other operations are UPDATE - _ => Some(operation_fields(config.update_operation_fields.clone())), - }; - - // Publish the operation encoded on an entry to storage. - let (entry_encoded, publish_entry_response) = send_to_store( - &db.store, - &operation( - next_operation_fields, - previous_operation, - Some(config.schema.to_owned()), - ), - document_id.as_ref(), - key_pair, - ) - .await; - - // Set the previous_operations based on the backlink - previous_operation = publish_entry_response.backlink.map(DocumentViewId::from); - - // If this was the first entry in the document, store the doucment id for later. - if index == 0 { - document_id = Some(entry_encoded.hash().into()); - db.test_data.documents.push(document_id.clone().unwrap()); - } - } - } - } -} - -/// Helper method for publishing an operation encoded on an entry to a store. -pub async fn send_to_store( - store: &SqlStorage, - operation: &Operation, - document_id: Option<&DocumentId>, - key_pair: &KeyPair, -) -> (EntrySigned, NextEntryArguments) { - // Get an Author from the key_pair. - let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); - - // Get the next entry arguments for this author and the passed document id. - let next_entry_args = store - .get_entry_args(&EntryArgsRequest { - public_key: author.clone(), - document_id: document_id.cloned(), - }) - .await - .unwrap(); - - // Construct the next entry. - let next_entry = Entry::new( - &next_entry_args.log_id.into(), - Some(operation), - next_entry_args.skiplink.map(Hash::from).as_ref(), - next_entry_args.backlink.map(Hash::from).as_ref(), - &next_entry_args.seq_num.into(), - ) - .unwrap(); - - // Encode both the entry and operation. - let entry_encoded = sign_and_encode(&next_entry, key_pair).unwrap(); - let operation_encoded = OperationEncoded::try_from(operation).unwrap(); - - // Publish the entry and get the next entry args. - let publish_entry_request = PublishEntryRequest { - entry: entry_encoded.clone(), - operation: operation_encoded, - }; - let publish_entry_response = store.publish_entry(&publish_entry_request).await.unwrap(); - - // Set or unwrap the passed document_id. - let document_id = if operation.is_create() { - entry_encoded.hash().into() - } else { - document_id.unwrap().to_owned() - }; - - // Also insert the operation into the store. - let verified_operation = - VerifiedOperation::new(&author, &entry_encoded.hash().into(), operation).unwrap(); - store - .insert_operation(&verified_operation, &document_id) - .await - .unwrap(); - - (entry_encoded, publish_entry_response) -} - -/// Create test database. -async fn initialize_db() -> Pool { - // Reset database first - drop_database().await; - create_database(&TEST_CONFIG.database_url).await.unwrap(); - - // Create connection pool and run all migrations - let pool = connection_pool(&TEST_CONFIG.database_url, 25) - .await - .unwrap(); - if run_pending_migrations(&pool).await.is_err() { - pool.close().await; - } - - pool -} - -/// Create test database. -async fn initialize_db_with_url(url: &str) -> Pool { - // Reset database first - drop_database().await; - create_database(url).await.unwrap(); - - // Create connection pool and run all migrations - let pool = connection_pool(url, 25).await.unwrap(); - if run_pending_migrations(&pool).await.is_err() { - pool.close().await; - } - - pool -} - -// Delete test database -async fn drop_database() { - if Any::database_exists(&TEST_CONFIG.database_url) - .await - .unwrap() - { - Any::drop_database(&TEST_CONFIG.database_url).await.unwrap(); - } -} - -/// A manager which can create many databases and retain a handle on their connection pools. -#[derive(Default)] -pub struct TestDatabaseManager { - pools: Arc>>, -} - -impl TestDatabaseManager { - pub fn new() -> Self { - Self::default() - } - - pub async fn create(&self, url: &str) -> TestDatabase { - // Initialise test database - let pool = initialize_db_with_url(url).await; - let test_db = TestDatabase { - store: SqlStorage::new(pool.clone()), - test_data: TestData::default(), - }; - self.pools.lock().await.push(pool); - test_db - } -} diff --git a/aquadoggo/src/db/stores/test_utils/helpers.rs b/aquadoggo/src/db/stores/test_utils/helpers.rs new file mode 100644 index 000000000..2dea11f1a --- /dev/null +++ b/aquadoggo/src/db/stores/test_utils/helpers.rs @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::convert::TryFrom; + +use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; +use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned}; +use p2panda_rs::hash::Hash; +use p2panda_rs::identity::{Author, KeyPair}; +use p2panda_rs::operation::{ + AsOperation, Operation, OperationEncoded, OperationValue, PinnedRelation, PinnedRelationList, + Relation, RelationList, +}; +use p2panda_rs::storage_provider::traits::{OperationStore, StorageProvider}; +use p2panda_rs::test_utils::constants::PRIVATE_KEY; + +use crate::db::provider::SqlStorage; +use crate::db::traits::DocumentStore; +use crate::domain::{next_args, publish}; + +/// A complex set of fields which can be used in aquadoggo tests. +pub fn doggo_test_fields() -> Vec<(&'static str, OperationValue)> { + vec![ + ("username", OperationValue::Text("bubu".to_owned())), + ("height", OperationValue::Float(3.5)), + ("age", OperationValue::Integer(28)), + ("is_admin", OperationValue::Boolean(false)), + ( + "profile_picture", + OperationValue::Relation(Relation::new( + Hash::new("0020eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee") + .unwrap() + .into(), + )), + ), + ( + "special_profile_picture", + OperationValue::PinnedRelation(PinnedRelation::new( + Hash::new("0020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + .unwrap() + .into(), + )), + ), + ( + "many_profile_pictures", + OperationValue::RelationList(RelationList::new(vec![ + Hash::new("0020aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + .unwrap() + .into(), + Hash::new("0020bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + .unwrap() + .into(), + ])), + ), + ( + "many_special_profile_pictures", + OperationValue::PinnedRelationList(PinnedRelationList::new(vec![ + Hash::new("0020cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + .unwrap() + .into(), + Hash::new("0020dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") + .unwrap() + .into(), + ])), + ), + ( + "another_relation_field", + OperationValue::PinnedRelationList(PinnedRelationList::new(vec![ + Hash::new("0020abababababababababababababababababababababababababababababababab") + .unwrap() + .into(), + Hash::new("0020cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd") + .unwrap() + .into(), + ])), + ), + ] +} + +/// Helper for creating many key_pairs. +/// +/// The first keypair created will allways be `PRIVATE_KEY`. +pub fn test_key_pairs(no_of_authors: usize) -> Vec { + let mut key_pairs = vec![KeyPair::from_private_key_str(PRIVATE_KEY).unwrap()]; + + for _index in 1..no_of_authors { + key_pairs.push(KeyPair::new()) + } + + key_pairs +} + +/// Helper for constructing a valid encoded entry and operation using valid next_args retrieved +/// from the passed store. +pub async fn encode_entry_and_operation( + store: &S, + operation: &Operation, + key_pair: &KeyPair, + document_id: Option<&DocumentId>, +) -> (EntrySigned, OperationEncoded) { + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + let document_view_id: Option = + document_id.map(|id| id.as_str().parse().unwrap()); + + // Get next args + let next_args = next_args::(store, &author, document_view_id.as_ref()) + .await + .unwrap(); + + // Construct the entry with passed operation. + let entry = Entry::new( + &next_args.log_id.into(), + Some(operation), + next_args.skiplink.map(Hash::from).as_ref(), + next_args.backlink.map(Hash::from).as_ref(), + &next_args.seq_num.into(), + ) + .unwrap(); + + // Sign and encode the entry. + let entry = sign_and_encode(&entry, key_pair).unwrap(); + // Encode the operation. + let operation = OperationEncoded::try_from(operation).unwrap(); + + // Return encoded entry and operation. + (entry, operation) +} + +/// Helper for inserting an entry, operation and document_view into the store. +pub async fn insert_entry_operation_and_view( + store: &SqlStorage, + key_pair: &KeyPair, + document_id: Option<&DocumentId>, + operation: &Operation, +) -> (DocumentId, DocumentViewId) { + if !operation.is_create() && document_id.is_none() { + panic!("UPDATE and DELETE operations require a DocumentId to be passed") + } + + // Encode entry and operation. + let (entry_signed, operation_encoded) = + encode_entry_and_operation(store, operation, key_pair, document_id).await; + + // Unwrap document_id or construct it from the entry hash. + let document_id = document_id + .cloned() + .unwrap_or_else(|| entry_signed.hash().into()); + let document_view_id: DocumentViewId = entry_signed.hash().into(); + + // Publish the entry. + publish(store, &entry_signed, &operation_encoded) + .await + .unwrap(); + + // Materialise the effected document. + let document_operations = store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); + store.insert_document(&document).await.unwrap(); + + // Return the document_id and document_view_id. + (document_id, document_view_id) +} diff --git a/aquadoggo/src/db/stores/test_utils/mod.rs b/aquadoggo/src/db/stores/test_utils/mod.rs new file mode 100644 index 000000000..dfe14e405 --- /dev/null +++ b/aquadoggo/src/db/stores/test_utils/mod.rs @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +mod helpers; +mod runner; +mod store; + +pub use helpers::{ + doggo_test_fields, encode_entry_and_operation, insert_entry_operation_and_view, test_key_pairs, +}; +pub use runner::{ + test_db, test_db_config, with_db_manager_teardown, TestDatabaseManager, TestDatabaseRunner, +}; +pub use store::{populate_test_db, send_to_store, PopulateDatabaseConfig, TestData, TestDatabase}; diff --git a/aquadoggo/src/db/stores/test_utils/runner.rs b/aquadoggo/src/db/stores/test_utils/runner.rs new file mode 100644 index 000000000..b5a30ff5a --- /dev/null +++ b/aquadoggo/src/db/stores/test_utils/runner.rs @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::panic; +use std::sync::Arc; + +use futures::Future; +use p2panda_rs::operation::OperationValue; +use p2panda_rs::schema::SchemaId; +use p2panda_rs::test_utils::constants::SCHEMA_ID; +use rstest::fixture; +use tokio::runtime::Builder; +use tokio::sync::Mutex; + +use crate::db::provider::SqlStorage; +use crate::db::stores::test_utils::{ + populate_test_db, PopulateDatabaseConfig, TestData, TestDatabase, +}; +use crate::db::Pool; +use crate::test_helpers::{initialize_db, initialize_db_with_url}; + +use super::doggo_test_fields; + +#[async_trait::async_trait] +pub trait AsyncTestFn { + async fn call(self, db: TestDatabase); +} + +#[async_trait::async_trait] +impl AsyncTestFn for FN +where + FN: FnOnce(TestDatabase) -> F + Sync + Send, + F: Future + Send, +{ + async fn call(self, db: TestDatabase) { + self(db).await + } +} + +#[async_trait::async_trait] +pub trait AsyncTestFnWithManager { + async fn call(self, db: TestDatabaseManager); +} + +#[async_trait::async_trait] +impl AsyncTestFnWithManager for FN +where + FN: FnOnce(TestDatabaseManager) -> F + Sync + Send, + F: Future + Send, +{ + async fn call(self, db: TestDatabaseManager) { + self(db).await + } +} + +// @TODO: I'm keeping this here for now as otherwise we would need to refactor _all_ the tests using it. +// +// We may still want to keep this "single database" runner injected through `rstest` but in any case +// probably best to consider that in a different PR. +pub struct TestDatabaseRunner { + config: PopulateDatabaseConfig, +} + +impl TestDatabaseRunner { + /// Provides a safe way to write tests using a database which closes the pool connection + /// automatically when the test succeeds or fails. + /// + /// Takes an (async) test function as an argument and passes over the `TestDatabase` instance + /// so it can be used inside of it. + pub fn with_db_teardown(&self, test: F) { + let runtime = Builder::new_current_thread() + .worker_threads(1) + .enable_all() + .thread_name("with_db_teardown") + .build() + .expect("Could not build tokio Runtime for test"); + + runtime.block_on(async { + // Initialise test database + let pool = initialize_db().await; + let mut db = TestDatabase { + store: SqlStorage::new(pool), + test_data: TestData::default(), + }; + + // Populate the test db + populate_test_db(&mut db, &self.config).await; + + // Get a handle of the underlying database connection pool + let pool = db.store.pool.clone(); + + // Spawn the test in a separate task to make sure we have control over the possible + // panics which might happen inside of it + let handle = tokio::task::spawn(async move { + // Execute the actual test + test.call(db).await; + }); + + // Get a handle of the task so we can use it later + let result = handle.await; + + // Unwind the test by closing down the connection to the database pool. This will + // be reached even when the test panicked + pool.close().await; + + // Panic here when test failed. The test fails within its own async task and stays + // there, we need to propagate it further to inform the test runtime about the result + match result { + Ok(_) => (), + Err(err) => panic::resume_unwind(err.into_panic()), + }; + }); + } +} + +/// A manager which can create many databases and retain a handle on their connection pools. +#[derive(Default)] +pub struct TestDatabaseManager { + pools: Arc>>, +} + +impl TestDatabaseManager { + pub fn new() -> Self { + Self::default() + } + + pub async fn create(&self, url: &str) -> TestDatabase { + // Initialise test database + let pool = initialize_db_with_url(url).await; + let test_db = TestDatabase { + store: SqlStorage::new(pool.clone()), + test_data: TestData::default(), + }; + self.pools.lock().await.push(pool); + test_db + } +} + +/// Fixture for constructing a storage provider instance backed by a pre-populated database. +/// +/// Returns a `TestDatabaseRunner` that bootstraps a safe async test environment connecting to a +/// database. It makes sure the runner disconnects properly from the connection pool after the test +/// succeeded or even failed. +/// +/// Passed parameters define what the database should contain. The first entry in each log contains +/// a valid CREATE operation following entries contain duplicate UPDATE operations. If the +/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. +#[fixture] +pub fn test_db( + // Number of entries per log/document + #[default(0)] no_of_entries: usize, + // Number of logs for each author + #[default(0)] no_of_logs: usize, + // Number of authors, each with logs populated as defined above + #[default(0)] no_of_authors: usize, + // A boolean flag for wether all logs should contain a delete operation + #[default(false)] with_delete: bool, + // The schema used for all operations in the db + #[default(SCHEMA_ID.parse().unwrap())] schema: SchemaId, + // The fields used for every CREATE operation + #[default(doggo_test_fields())] create_operation_fields: Vec<(&'static str, OperationValue)>, + // The fields used for every UPDATE operation + #[default(doggo_test_fields())] update_operation_fields: Vec<(&'static str, OperationValue)>, +) -> TestDatabaseRunner { + let config = PopulateDatabaseConfig { + no_of_entries, + no_of_logs, + no_of_authors, + with_delete, + schema, + create_operation_fields, + update_operation_fields, + }; + + TestDatabaseRunner { config } +} + +/// Fixture for passing in `PopulateDatabaseConfig` into tests. +#[fixture] +pub fn test_db_config( + // Number of entries per log/document + #[default(0)] no_of_entries: usize, + // Number of logs for each author + #[default(0)] no_of_logs: usize, + // Number of authors, each with logs populated as defined above + #[default(0)] no_of_authors: usize, + // A boolean flag for wether all logs should contain a delete operation + #[default(false)] with_delete: bool, + // The schema used for all operations in the db + #[default(SCHEMA_ID.parse().unwrap())] schema: SchemaId, + // The fields used for every CREATE operation + #[default(doggo_test_fields())] create_operation_fields: Vec<(&'static str, OperationValue)>, + // The fields used for every UPDATE operation + #[default(doggo_test_fields())] update_operation_fields: Vec<(&'static str, OperationValue)>, +) -> PopulateDatabaseConfig { + PopulateDatabaseConfig { + no_of_entries, + no_of_logs, + no_of_authors, + with_delete, + schema, + create_operation_fields, + update_operation_fields, + } +} + +/// Method which provides a safe way to write tests with the ability to build many databases and +/// have their pool connections closed automatically when the test succeeds or fails. +/// +/// Takes an (async) test function as an argument and passes over the `TestDatabaseManager` +/// instance which can be used to build databases from inside the tests. +pub fn with_db_manager_teardown(test: F) { + let runtime = Builder::new_current_thread() + .worker_threads(1) + .enable_all() + .thread_name("with_db_teardown") + .build() + .expect("Could not build tokio Runtime for test"); + + // Instantiate the database manager + let db_manager = TestDatabaseManager::new(); + + // Get a handle onto it's collection of pools + let pools = db_manager.pools.clone(); + + runtime.block_on(async { + // Spawn the test in a separate task to make sure we have control over the possible + // panics which might happen inside of it + let handle = tokio::task::spawn(async move { + // Execute the actual test + test.call(db_manager).await; + }); + + // Get a handle of the task so we can use it later + let result = handle.await; + + // Unwind the test by closing down the connections to all the database pools. This + // will be reached even when the test panicked + for pool in pools.lock().await.iter() { + pool.close().await; + } + + // Panic here when test failed. The test fails within its own async task and stays + // there, we need to propagate it further to inform the test runtime about the result + match result { + Ok(_) => (), + Err(err) => panic::resume_unwind(err.into_panic()), + }; + }); +} diff --git a/aquadoggo/src/db/stores/test_utils/store.rs b/aquadoggo/src/db/stores/test_utils/store.rs new file mode 100644 index 000000000..35a22005e --- /dev/null +++ b/aquadoggo/src/db/stores/test_utils/store.rs @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::convert::TryFrom; + +use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned}; +use p2panda_rs::hash::Hash; +use p2panda_rs::identity::{Author, KeyPair}; +use p2panda_rs::operation::{Operation, OperationEncoded, OperationValue}; +use p2panda_rs::schema::SchemaId; +use p2panda_rs::storage_provider::traits::StorageProvider; +use p2panda_rs::test_utils::constants::SCHEMA_ID; +use p2panda_rs::test_utils::fixtures::{operation, operation_fields}; + +use crate::db::stores::test_utils::{doggo_test_fields, test_key_pairs}; +use crate::domain::{next_args, publish}; +use crate::graphql::client::NextEntryArguments; + +/// Container for `SqlStore` with access to the document ids and key_pairs used in the +/// pre-populated database for testing. +pub struct TestDatabase { + pub store: S, + pub test_data: TestData, +} + +/// Data collected when populating a `TestDatabase` in order to easily check values which +/// would be otherwise hard or impossible to get through the store methods. +#[derive(Default)] +pub struct TestData { + pub key_pairs: Vec, + pub documents: Vec, +} + +/// Configuration used when populating a `TestDatabase`. +pub struct PopulateDatabaseConfig { + /// Number of entries per log/document. + pub no_of_entries: usize, + + /// Number of logs for each author. + pub no_of_logs: usize, + + /// Number of authors, each with logs populated as defined above. + pub no_of_authors: usize, + + /// A boolean flag for wether all logs should contain a delete operation. + pub with_delete: bool, + + /// The schema used for all operations in the db. + pub schema: SchemaId, + + /// The fields used for every CREATE operation. + pub create_operation_fields: Vec<(&'static str, OperationValue)>, + + /// The fields used for every UPDATE operation. + pub update_operation_fields: Vec<(&'static str, OperationValue)>, +} + +impl Default for PopulateDatabaseConfig { + fn default() -> Self { + Self { + no_of_entries: 0, + no_of_logs: 0, + no_of_authors: 0, + with_delete: false, + schema: SCHEMA_ID.parse().unwrap(), + create_operation_fields: doggo_test_fields(), + update_operation_fields: doggo_test_fields(), + } + } +} + +/// Helper method for populating a `TestDatabase` with configurable data. +/// +/// Passed parameters define what the db should contain. The first entry in each log contains a +/// valid CREATE operation following entries contain duplicate UPDATE operations. If the +/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. +pub async fn populate_test_db( + db: &mut TestDatabase, + config: &PopulateDatabaseConfig, +) { + let key_pairs = test_key_pairs(config.no_of_authors); + + for key_pair in &key_pairs { + db.test_data + .key_pairs + .push(KeyPair::from_private_key(key_pair.private_key()).unwrap()); + + for _log_id in 0..config.no_of_logs { + let mut document_id: Option = None; + let mut previous_operation: Option = None; + + for index in 0..config.no_of_entries { + // Create an operation based on the current index and whether this document should + // contain a DELETE operation + let next_operation_fields = match index { + // First operation is CREATE + 0 => Some(operation_fields(config.create_operation_fields.clone())), + // Last operation is DELETE if the with_delete flag is set + seq if seq == (config.no_of_entries - 1) && config.with_delete => None, + // All other operations are UPDATE + _ => Some(operation_fields(config.update_operation_fields.clone())), + }; + + // Publish the operation encoded on an entry to storage. + let (entry_encoded, publish_entry_response) = send_to_store::( + &db.store, + &operation( + next_operation_fields, + previous_operation, + Some(config.schema.to_owned()), + ), + document_id.as_ref(), + key_pair, + ) + .await; + + // Set the previous_operations based on the backlink + previous_operation = publish_entry_response.backlink.map(DocumentViewId::from); + + // If this was the first entry in the document, store the doucment id for later. + if index == 0 { + document_id = Some(entry_encoded.hash().into()); + db.test_data.documents.push(document_id.clone().unwrap()); + } + } + } + } +} + +/// Helper method for publishing an operation encoded on an entry to a store. +pub async fn send_to_store( + store: &S, + operation: &Operation, + document_id: Option<&DocumentId>, + key_pair: &KeyPair, +) -> (EntrySigned, NextEntryArguments) { + // Get an Author from the key_pair. + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + let document_view_id: Option = + document_id.map(|id| id.as_str().parse().unwrap()); + + let next_entry_args = next_args(store, &author, document_view_id.as_ref()) + .await + .unwrap(); + + // Construct the next entry. + let next_entry = Entry::new( + &next_entry_args.log_id.into(), + Some(operation), + next_entry_args.skiplink.map(Hash::from).as_ref(), + next_entry_args.backlink.map(Hash::from).as_ref(), + &next_entry_args.seq_num.into(), + ) + .unwrap(); + + // Encode both the entry and operation. + let entry_encoded = sign_and_encode(&next_entry, key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(operation).unwrap(); + + // Publish the entry and get the next entry args. + let publish_entry_response = publish(store, &entry_encoded, &operation_encoded) + .await + .unwrap(); + + (entry_encoded, publish_entry_response) +} diff --git a/aquadoggo/src/db/utils.rs b/aquadoggo/src/db/utils.rs index eda8c63dd..d8e2c42d6 100644 --- a/aquadoggo/src/db/utils.rs +++ b/aquadoggo/src/db/utils.rs @@ -198,11 +198,11 @@ pub fn parse_value_to_string_vec(value: &OperationValue) -> Vec { db_values } OperationValue::PinnedRelation(pinned_relation) => { - vec![pinned_relation.view_id().as_str()] + vec![pinned_relation.view_id().to_string()] } OperationValue::PinnedRelationList(pinned_relation_list) => pinned_relation_list .iter() - .map(|document_view_id| document_view_id.as_str()) + .map(|document_view_id| document_view_id.to_string()) .collect(), } } diff --git a/aquadoggo/src/domain.rs b/aquadoggo/src/domain.rs new file mode 100644 index 000000000..ee14e86df --- /dev/null +++ b/aquadoggo/src/domain.rs @@ -0,0 +1,1251 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::collections::HashSet; + +use anyhow::{anyhow, ensure, Result as AnyhowResult}; +use async_graphql::Result; +use bamboo_rs_core_ed25519_yasmf::entry::is_lipmaa_required; +use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::entry::{decode_entry, EntrySigned, LogId, SeqNum}; +use p2panda_rs::identity::Author; +use p2panda_rs::operation::{ + AsOperation, AsVerifiedOperation, Operation, OperationAction, OperationEncoded, +}; +use p2panda_rs::storage_provider::traits::{AsStorageEntry, AsStorageLog, StorageProvider}; +use p2panda_rs::Human; + +use crate::graphql::client::NextEntryArguments; +use crate::validation::{ + ensure_document_not_deleted, get_expected_skiplink, increment_seq_num, is_next_seq_num, + next_log_id, verify_log_id, +}; + +/// Retrieve arguments required for constructing the next entry in a bamboo log for a specific +/// author and document. +/// +/// We accept a `DocumentViewId` rather than a `DocumentId` as an argument and then identify +/// the document id based on operations already existing in the store. Doing this means a document +/// can be updated without knowing the document id itself. +/// +/// This method is intended to be used behind a public API and so we assume all passed values +/// are in themselves valid. +/// +/// The steps and validation checks this method performs are: +/// +/// Check if a document view id was passed +/// +/// - if it wasn't we are creating a new document, safely increment the latest log id for the +/// passed author and return args immediately +/// - if it was continue knowing we are updating an existing document +/// +/// Determine the document id we are concerned with +/// +/// - verify that all operations in the passed document view id exist in the database +/// - verify that all operations in the passed document view id are from the same document +/// - ensure the document is not deleted +/// +/// Determine next arguments +/// +/// - get the log id for this author and document id, or if none is found safely increment this +/// authors latest log id +/// - get the backlink entry (latest entry for this author and log) +/// - get the skiplink for this author, log and next seq num +/// - get the latest seq num for this author and log and safely increment +/// +/// Finally, return next arguments +pub async fn next_args( + store: &S, + public_key: &Author, + document_view_id: Option<&DocumentViewId>, +) -> Result { + // Init the next args with base default values. + let mut next_args = NextEntryArguments { + backlink: None, + skiplink: None, + seq_num: SeqNum::default().into(), + log_id: LogId::default().into(), + }; + + //////////////////////// + // HANDLE CREATE CASE // + //////////////////////// + + // If no document_view_id is passed then this is a request for publishing a CREATE operation + // and we return the args for the next free log by this author. + if document_view_id.is_none() { + let log_id = next_log_id(store, public_key).await?; + next_args.log_id = log_id.into(); + return Ok(next_args); + } + + /////////////////////////// + // DETERMINE DOCUMENT ID // + /////////////////////////// + + // We can unwrap here as we know document_view_id is some. + let document_view_id = document_view_id.unwrap(); + + // Get the document_id for this document_view_id. This performs several validation steps (check + // method doc string). + let document_id = get_checked_document_id_for_view_id(store, document_view_id).await?; + + // Check the document is not deleted. + ensure_document_not_deleted(store, &document_id).await?; + + ///////////////////////// + // DETERMINE NEXT ARGS // + ///////////////////////// + + // Retrieve the log_id for the found document_id and author. + // + // @TODO: (lolz, this method is just called `get()`) + let log_id = store.get(public_key, &document_id).await?; + + // Check if an existing log id was found for this author and document. + match log_id { + // If it wasn't found, we just calculate the next log id safely and return the next args. + None => { + let next_log_id = next_log_id(store, public_key).await?; + next_args.log_id = next_log_id.into() + } + // If one was found, we need to get the backlink and skiplink, and safely increment the seq num. + Some(log_id) => { + // Get the latest entry in this log. + let latest_entry = store.get_latest_entry(public_key, &log_id).await?; + + // Determine the next sequence number by incrementing one from the latest entry seq num. + // + // If the latest entry is None, then we must be at seq num 1. + let seq_num = match latest_entry { + Some(ref latest_entry) => { + let mut latest_seq_num = latest_entry.seq_num(); + increment_seq_num(&mut latest_seq_num) + } + None => Ok(SeqNum::default()), + } + .map_err(|_| { + anyhow!( + "Max sequence number reached for {} log {}", + public_key.display(), + log_id.as_u64() + ) + })?; + + // Check if skiplink is required and if it is get the entry and return its hash. + let skiplink = if is_lipmaa_required(seq_num.as_u64()) { + // Determine skiplink ("lipmaa"-link) entry in this log. + Some(get_expected_skiplink(store, public_key, &log_id, &seq_num).await?) + } else { + None + } + .map(|entry| entry.hash()); + + next_args.backlink = latest_entry.map(|entry| entry.hash().into()); + next_args.skiplink = skiplink.map(|hash| hash.into()); + next_args.seq_num = seq_num.into(); + next_args.log_id = log_id.into(); + } + }; + + Ok(next_args) +} + +/// Persist an entry and operation to storage after performing validation of claimed values against +/// expected values retrieved from storage. +/// +/// Returns the arguments required for constructing the next entry in a bamboo log for the +/// specified author and document. +/// +/// This method is intended to be used behind a public API and so we assume all passed values +/// are in themselves valid. +/// +/// # Steps and Validation Performed +/// +/// Following is a list of the steps and validation checks that this method performs. +/// +/// ## Validate Entry +/// +/// Validate the values encoded on entry against what we expect based on our existing stored +/// entries: +/// +/// - Verify the claimed sequence number against the expected next sequence number for the author +/// and log. +/// - Get the expected backlink from storage. +/// - Get the expected skiplink from storage. +/// - Verify the bamboo entry (requires the expected backlink and skiplink to do this). +/// +/// ## Ensure single node per author +/// +/// - @TODO +/// +/// ## Validate operation against it's claimed schema: +/// +/// - @TODO +/// +/// ## Determine document id +/// +/// - If this is a create operation: +/// - derive the document id from the entry hash. +/// - In all other cases: +/// - verify that all operations in previous_operations exist in the database, +/// - verify that all operations in previous_operations are from the same document, +/// - ensure that the document is not deleted. +/// - Verify that the claimed log id matches the expected log id for this author and log. +/// +/// ## Persist data +/// +/// - If this is a new document: +/// - Store the new log. +/// - Store the entry. +/// - Store the operation. +/// +/// ## Compute and return next entry arguments +pub async fn publish( + store: &S, + entry_encoded: &EntrySigned, + operation_encoded: &OperationEncoded, +) -> Result { + //////////////////////////////// + // DECODE ENTRY AND OPERATION // + //////////////////////////////// + + let entry = decode_entry(entry_encoded, Some(operation_encoded))?; + let operation = Operation::from(operation_encoded); + let author = entry_encoded.author(); + let log_id = entry.log_id(); + let seq_num = entry.seq_num(); + + /////////////////////////// + // VALIDATE ENTRY VALUES // + /////////////////////////// + + // Verify that the claimed seq num matches the expected seq num for this author and log. + let latest_entry = store.get_latest_entry(&author, log_id).await?; + let latest_seq_num = latest_entry.as_ref().map(|entry| entry.seq_num()); + is_next_seq_num(latest_seq_num.as_ref(), seq_num)?; + + // The backlink for this entry is the latest entry from this public key's log. + let backlink = latest_entry; + + // If a skiplink is claimed, get the expected skiplink from the database, errors + // if it can't be found. + let skiplink = match entry.skiplink_hash() { + Some(_) => Some(get_expected_skiplink(store, &author, log_id, seq_num).await?), + None => None, + }; + + // Verify the bamboo entry providing the encoded operation and retrieved backlink and skiplink. + bamboo_rs_core_ed25519_yasmf::verify( + &entry_encoded.to_bytes(), + Some(&operation_encoded.to_bytes()), + skiplink.map(|entry| entry.entry_bytes()).as_deref(), + backlink.map(|entry| entry.entry_bytes()).as_deref(), + )?; + + /////////////////////////////////// + // ENSURE SINGLE NODE PER AUTHOR // + /////////////////////////////////// + + // @TODO: Missing a step here where we check if the author has published to this node before, and also + // if we know of any other nodes they have published to. Not sure how to do this yet. + + /////////////////////////////// + // VALIDATE OPERATION VALUES // + /////////////////////////////// + + // @TODO: We skip this for now and will implement it in a follow-up PR + // validate_operation_against_schema(store, operation.operation()).await?; + + ////////////////////////// + // DETERMINE DOCUMENT ID // + ////////////////////////// + + let document_id = match operation.action() { + OperationAction::Create => { + // Derive the document id for this new document. + entry_encoded.hash().into() + } + _ => { + // We can unwrap previous operations here as we know all UPDATE and DELETE operations contain them. + let previous_operations = operation.previous_operations().unwrap(); + + // Get the document_id for the document_view_id contained in previous operations. + // This performs several validation steps (check method doc string). + let document_id = + get_checked_document_id_for_view_id(store, &previous_operations).await?; + + // Ensure the document isn't deleted. + ensure_document_not_deleted(store, &document_id) + .await + .map_err(|_| { + "You are trying to update or delete a document which has been deleted" + })?; + + document_id + } + }; + + // Verify the claimed log id against the expected one for this document id and author. + verify_log_id(store, &author, log_id, &document_id).await?; + + /////////////// + // STORE LOG // + /////////////// + + // If this is a CREATE operation it goes into a new log which we insert here. + if operation.is_create() { + let log = S::StorageLog::new(&author, &operation.schema(), &document_id, log_id); + + store.insert_log(log).await?; + } + + ///////////////////////////////////// + // DETERMINE NEXT ENTRY ARG VALUES // + ///////////////////////////////////// + + // If we have reached MAX_SEQ_NUM here for the next args then we will error and _not_ store the + // entry which is being processed in this request. + let next_seq_num = increment_seq_num(&mut seq_num.clone()).map_err(|_| { + anyhow!( + "Max sequence number reached for {} log {}", + author.display(), + log_id.as_u64() + ) + })?; + let backlink = Some(entry_encoded.hash()); + + // Check if skiplink is required and return hash if so + let skiplink = if is_lipmaa_required(next_seq_num.as_u64()) { + Some(get_expected_skiplink(store, &author, log_id, &next_seq_num).await?) + } else { + None + } + .map(|entry| entry.hash()); + + let next_args = NextEntryArguments { + log_id: (*log_id).into(), + seq_num: next_seq_num.into(), + backlink: backlink.map(|hash| hash.into()), + skiplink: skiplink.map(|hash| hash.into()), + }; + + /////////////////////////////// + // STORE ENTRY AND OPERATION // + /////////////////////////////// + + // Insert the entry into the store. + store + .insert_entry(S::StorageEntry::new(entry_encoded, operation_encoded)?) + .await?; + // Insert the operation into the store. + store + .insert_operation( + &S::StorageOperation::new(&author, &entry_encoded.hash().into(), &operation).unwrap(), + &document_id, + ) + .await?; + + Ok(next_args) +} + +/// Attempt to identify the document id for view id contained in a `next_args` request. +/// +/// This will fail if: +/// +/// - any of the operations contained in the view id _don't_ exist in the store +/// - any of the operations contained in the view id return a different document id than any of the others +pub async fn get_checked_document_id_for_view_id( + store: &S, + view_id: &DocumentViewId, +) -> AnyhowResult { + let mut found_document_ids: HashSet = HashSet::new(); + for operation in view_id.clone().into_iter() { + // If any operation can't be found return an error at this point already. + let document_id = store.get_document_by_operation_id(&operation).await?; + + ensure!( + document_id.is_some(), + anyhow!( + "{} not found, could not determine document id", + operation.display() + ) + ); + + found_document_ids.insert(document_id.unwrap()); + } + + // We can unwrap here as there must be at least one document view else the error above would + // have been triggered. + let mut found_document_ids_iter = found_document_ids.iter(); + let document_id = found_document_ids_iter.next().unwrap(); + + ensure!( + found_document_ids_iter.next().is_none(), + anyhow!("Invalid document view id: operations in passed document view id originate from different documents") + ); + Ok(document_id.to_owned()) +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use p2panda_rs::document::{DocumentId, DocumentViewId}; + use p2panda_rs::entry::{sign_and_encode, Entry, LogId, SeqNum}; + use p2panda_rs::hash::Hash; + use p2panda_rs::identity::{Author, KeyPair}; + use p2panda_rs::operation::{ + Operation, OperationEncoded, OperationFields, OperationId, OperationValue, + }; + use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; + use p2panda_rs::test_utils::constants::{PRIVATE_KEY, SCHEMA_ID}; + use p2panda_rs::test_utils::db::{MemoryStore, StorageEntry}; + use p2panda_rs::test_utils::fixtures::{ + create_operation, delete_operation, key_pair, operation, operation_fields, public_key, + random_document_view_id, random_hash, update_operation, + }; + use rstest::rstest; + + use crate::db::provider::SqlStorage; + use crate::db::stores::test_utils::{ + doggo_test_fields, encode_entry_and_operation, populate_test_db, send_to_store, test_db, + test_db_config, PopulateDatabaseConfig, TestData, TestDatabase, TestDatabaseRunner, + }; + use crate::domain::publish; + use crate::graphql::client::NextEntryArguments; + + use super::{get_checked_document_id_for_view_id, next_args}; + + type LogIdAndSeqNum = (u64, u64); + + /// Helper method for removing entries from a MemoryStore by Author & LogIdAndSeqNum. + fn remove_entries(store: &MemoryStore, author: &Author, entries_to_remove: &[LogIdAndSeqNum]) { + store.entries.lock().unwrap().retain(|_, entry| { + !entries_to_remove.contains(&(entry.log_id().as_u64(), entry.seq_num().as_u64())) + && &entry.author() == author + }); + } + + /// Helper method for removing operations from a MemoryStore by Author & LogIdAndSeqNum. + fn remove_operations( + store: &MemoryStore, + author: &Author, + operations_to_remove: &[LogIdAndSeqNum], + ) { + for (hash, entry) in store.entries.lock().unwrap().iter() { + if operations_to_remove.contains(&(entry.log_id().as_u64(), entry.seq_num().as_u64())) + && &entry.author() == author + { + store + .operations + .lock() + .unwrap() + .remove(&hash.clone().into()); + } + } + } + + #[rstest] + fn errors_when_passed_non_existent_view_id( + #[from(test_db)] runner: TestDatabaseRunner, + #[from(random_document_view_id)] document_view_id: DocumentViewId, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let result = get_checked_document_id_for_view_id(&db.store, &document_view_id).await; + assert!(result.is_err()); + }); + } + + #[rstest] + fn gets_document_id_for_view( + #[from(test_db)] runner: TestDatabaseRunner, + operation: Operation, + operation_fields: OperationFields, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Store one entry and operation in the store. + let (entry, _) = send_to_store(&db.store, &operation, None, &KeyPair::new()).await; + let operation_one_id: OperationId = entry.hash().into(); + + // Store another entry and operation, from a different author, which perform an update on the earlier operation. + let update_operation = Operation::new_update( + SCHEMA_ID.parse().unwrap(), + operation_one_id.clone().into(), + operation_fields, + ) + .unwrap(); + + let (entry, _) = send_to_store( + &db.store, + &update_operation, + Some(&entry.hash().into()), + &KeyPair::new(), + ) + .await; + let operation_two_id: OperationId = entry.hash().into(); + + // Get the document id for the passed view id. + let result = get_checked_document_id_for_view_id( + &db.store, + &DocumentViewId::new(&[operation_one_id.clone(), operation_two_id]).unwrap(), + ) + .await; + + // Result should be ok. + assert!(result.is_ok()); + + // The returned document id should match the expected one. + let document_id = result.unwrap(); + assert_eq!(document_id, DocumentId::new(operation_one_id)) + }); + } + + #[rstest] + #[case::ok(&[(0, 8)], (0, 8))] + #[should_panic( + expected = "Expected skiplink target not found in store: , log id 0, seq num 4" + )] + #[case::skiplink_missing(&[(0, 4), (0, 8)], (0, 8))] + #[should_panic( + expected = "Entry's claimed seq num of 8 does not match expected seq num of 7 for given author and log" + )] + #[case::backlink_missing(&[(0, 7), (0, 8)], (0, 8))] + #[should_panic( + expected = "Entry's claimed seq num of 8 does not match expected seq num of 7 for given author and log" + )] + #[case::backlink_and_skiplink_missing(&[(0, 4), (0, 7), (0, 8)], (0, 8))] + #[should_panic( + expected = "Entry's claimed seq num of 8 does not match expected seq num of 9 for given author and log" + )] + #[case::seq_num_occupied_again(&[], (0, 8))] + #[should_panic( + expected = "Entry's claimed seq num of 7 does not match expected seq num of 9 for given author and log" + )] + #[case::seq_num_occupied_(&[], (0, 7))] + #[should_panic( + expected = "Expected skiplink target not found in store: , log id 0, seq num 4" + )] + #[case::next_args_skiplink_missing(&[(0, 4), (0, 7), (0, 8)], (0, 7))] + #[should_panic( + expected = "Entry's claimed seq num of 8 does not match expected seq num of 1 for given author and log" + )] + #[case::no_entries_yet(&[(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8)], (0, 8))] + #[tokio::test] + async fn publish_with_missing_entries( + #[case] entries_to_remove: &[LogIdAndSeqNum], + #[case] entry_to_publish: LogIdAndSeqNum, + #[from(test_db_config)] + #[with(8, 1, 1)] + config: PopulateDatabaseConfig, + ) { + // Populate the db with 8 entries. + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + + // The author who has published to the db. + let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); + + // Get the latest entry from the db. + let next_entry = db + .store + .get_entry_at_seq_num( + &author, + &LogId::new(entry_to_publish.0), + &SeqNum::new(entry_to_publish.1).unwrap(), + ) + .await + .unwrap() + .unwrap(); + + // Remove some entries and operations from the database. + remove_operations(&db.store, &author, entries_to_remove); + remove_entries(&db.store, &author, entries_to_remove); + + // Publish the latest entry again and see what happens. + let result = publish( + &db.store, + &next_entry.entry_signed(), + &next_entry.operation_encoded().unwrap(), + ) + .await; + + // Unwrap here causing a panic, we check the errors match what we expect. + result.unwrap(); + } + + #[rstest] + #[case::ok_single_writer(&[], &[(0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + // Weird case where all previous operations are on the same branch, but still valid. + #[case::ok_many_previous_operations(&[], &[(0, 8), (0, 7), (0, 6)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[case::ok_multi_writer(&[], &[(0, 8)], KeyPair::new())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::previous_operation_missing(&[(0, 8)], &[(0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::one_of_some_previous_operations_missing(&[(0, 7)], &[(0, 7), (0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::one_of_some_previous_operations_missing(&[(0, 8)], &[(0, 7), (0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::missing_previous_operation_multi_writer(&[(0, 8)], &[(0, 8)], KeyPair::new())] + #[should_panic( + expected = "Invalid document view id: operations in passed document view id originate from different documents" + )] + #[case::previous_operations_invalid_multiple_document_id(&[], &[(0, 8), (1, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[tokio::test] + async fn publish_with_missing_operations( + // The operations to be removed from the db + #[case] operations_to_remove: &[LogIdAndSeqNum], + // The previous operations described by their log id and seq number (log_id, seq_num) + #[case] previous_operations: &[LogIdAndSeqNum], + #[case] key_pair: KeyPair, + #[from(test_db_config)] + #[with(8, 2, 1)] + config: PopulateDatabaseConfig, + ) { + // Populate the db with 8 entries. + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); + + // Get the document id. + let document_id = db.test_data.documents.first().unwrap(); + + // Map the passed &[LogIdAndSeqNum] into a DocumentViewId containing the claimed operations. + let previous_operations: Vec = previous_operations + .iter() + .filter_map(|(log_id, seq_num)| { + db.store + .entries + .lock() + .unwrap() + .values() + .find(|entry| { + entry.seq_num().as_u64() == *seq_num + && entry.log_id.as_u64() == *log_id + && entry.author() == author + }) + .map(|entry| entry.hash().into()) + }) + .collect(); + // Construct document view id for previous operations. + let document_view_id = DocumentViewId::new(&previous_operations).unwrap(); + + // Compose the next operation. + let next_operation = Operation::new_update( + SCHEMA_ID.parse().unwrap(), + document_view_id, + operation_fields(doggo_test_fields()), + ) + .unwrap(); + + // Encode an entry and the operation. + let (entry, operation) = + encode_entry_and_operation(&db.store, &next_operation, &key_pair, Some(document_id)) + .await; + + // Remove some entries from the db. + remove_operations(&db.store, &author, operations_to_remove); + + // Publish the entry and operation. + let result = publish(&db.store, &entry, &operation).await; + + // Unwrap here causing a panic, we check the errors match what we expect. + result.unwrap(); + } + + #[rstest] + #[case::ok_single_writer(&[], &[(0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[case::ok_many_previous_operations(&[], &[(0, 8), (0, 7), (0, 6)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[case::ok_not_the_most_recent_document_view_id(&[], &[(0, 1)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[case::ok_multi_writer(&[], &[(0, 8)], KeyPair::new())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::previous_operation_missing(&[(0, 8)], &[(0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::one_of_some_previous_operations_missing(&[(0, 7)], &[(0, 7), (0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::one_of_some_previous_operations_missing(&[(0, 8)], &[(0, 7), (0, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = " not found, could not determine document id")] + #[case::missing_previous_operation_multi_writer(&[(0, 8)], &[(0, 8)], KeyPair::new())] + #[should_panic( + expected = "Invalid document view id: operations in passed document view id originate from different documents" + )] + #[case::previous_operations_invalid_multiple_document_id(&[], &[(0, 8), (1, 8)], KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[tokio::test] + async fn next_args_with_missing_operations( + #[case] operations_to_remove: &[LogIdAndSeqNum], + #[case] document_view_id: &[LogIdAndSeqNum], + #[case] key_pair: KeyPair, + #[from(test_db_config)] + #[with(8, 2, 1)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + let author_with_removed_operations = + Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); + let author_making_request = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + // Map the passed &[LogIdAndSeqNum] into a DocumentViewId containing the claimed operations. + let document_view_id: Vec = document_view_id + .iter() + .filter_map(|(log_id, seq_num)| { + db.store + .entries + .lock() + .unwrap() + .values() + .find(|entry| { + entry.seq_num().as_u64() == *seq_num + && entry.log_id.as_u64() == *log_id + && entry.author() == author_with_removed_operations + }) + .map(|entry| entry.hash().into()) + }) + .collect(); + + // Construct document view id for previous operations. + let document_view_id = DocumentViewId::new(&document_view_id).unwrap(); + + // Remove some operations. + remove_operations( + &db.store, + &author_with_removed_operations, + operations_to_remove, + ); + + // Get the next args. + let result = next_args(&db.store, &author_making_request, Some(&document_view_id)).await; + + // Unwrap here causing a panic, we check the errors match what we expect. + result.unwrap(); + } + + type SeqNumU64 = u64; + type Backlink = Option; + type Skiplink = Option; + + #[rstest] + #[case(0, None, (1, None, None))] + #[case(1, Some(1), (2, Some(1), None))] + #[case(2, Some(2), (3, Some(2), None))] + #[case(3, Some(3), (4, Some(3), Some(1)))] + #[case(4, Some(4), (5, Some(4), None))] + #[case(5, Some(5), (6, Some(5), None))] + #[case(6, Some(6), (7, Some(6), None))] + #[case(7, Some(7), (8, Some(7), Some(4)))] + #[case(2, Some(1), (3, Some(2), None))] + #[case(3, Some(1), (4, Some(3), Some(1)))] + #[case(4, Some(1), (5, Some(4), None))] + #[case(5, Some(1), (6, Some(5), None))] + #[case(6, Some(1), (7, Some(6), None))] + #[case(7, Some(1), (8, Some(7), Some(4)))] + #[tokio::test] + async fn next_args_with_expected_results( + #[case] no_of_entries: usize, + #[case] document_view_id: Option, + #[case] expected_next_args: (SeqNumU64, Backlink, Skiplink), + ) { + // Populate the db with the number of entries defined in the test params. + let config = PopulateDatabaseConfig { + no_of_entries, + no_of_logs: 1, + no_of_authors: 1, + ..PopulateDatabaseConfig::default() + }; + + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + + // The author who published the entries. + let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); + + // Construct the passed document view id (specified by a single sequence number) + let document_view_id: Option = document_view_id.map(|seq_num| { + db.store + .entries + .lock() + .unwrap() + .values() + .find(|entry| entry.seq_num().as_u64() == seq_num) + .map(|entry| DocumentViewId::new(&[entry.hash().into()]).unwrap()) + .unwrap() + }); + + // Construct the expected next args + let expected_seq_num = SeqNum::new(expected_next_args.0).unwrap(); + let expected_log_id = LogId::default(); + let expected_backlink = match expected_next_args.1 { + Some(backlink) => db + .store + .get_entry_at_seq_num(&author, &expected_log_id, &SeqNum::new(backlink).unwrap()) + .await + .unwrap() + .map(|entry| entry.hash()), + None => None, + }; + let expected_skiplink = match expected_next_args.2 { + Some(skiplink) => db + .store + .get_entry_at_seq_num(&author, &expected_log_id, &SeqNum::new(skiplink).unwrap()) + .await + .unwrap() + .map(|entry| entry.hash()), + None => None, + }; + let expected_next_args = NextEntryArguments { + log_id: expected_log_id.into(), + seq_num: expected_seq_num.into(), + backlink: expected_backlink.map(|hash| hash.into()), + skiplink: expected_skiplink.map(|hash| hash.into()), + }; + + // Request next args for the author and docuent view. + let result = next_args(&db.store, &author, document_view_id.as_ref()).await; + assert_eq!(result.unwrap(), expected_next_args); + } + + #[rstest] + #[tokio::test] + async fn gets_next_args_other_cases( + public_key: Author, + #[from(test_db_config)] + #[with(7, 1, 1)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + + // Get with no DocumentViewId given. + let result = next_args(&db.store, &public_key, None).await; + assert!(result.is_ok()); + assert_eq!( + NextEntryArguments { + backlink: None, + skiplink: None, + log_id: LogId::new(1).into(), + seq_num: SeqNum::default().into(), + }, + result.unwrap() + ); + + // Get with non-existent DocumentViewId given. + let result = next_args(&db.store, &public_key, Some(&random_document_view_id())).await; + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .message + .as_str() + .contains("could not determine document id") // This is a partial string match, preceded by " not found," + ); + + // Here we are missing the skiplink. + remove_entries(&db.store, &public_key, &[(0, 4)]); + let document_id = db.test_data.documents.get(0).unwrap(); + let document_view_id = + DocumentViewId::new(&[document_id.as_str().parse().unwrap()]).unwrap(); + + let result = next_args(&db.store, &public_key, Some(&document_view_id)).await; + assert_eq!( + result.unwrap_err().message.as_str(), + "Expected skiplink target not found in store: , log id 0, seq num 4" + ); + } + + #[rstest] + #[case::owner_publishes_update_to_correct_log(LogId::new(0), KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[case::new_author_updates_to_new_log(LogId::new(0), KeyPair::new())] + #[should_panic( + expected = "Entry's claimed log id of 1 does not match existing log id of 0 for given author and document" + )] + #[case::owner_updates_to_wrong_and_taken_log(LogId::new(1), KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic( + expected = "Entry's claimed log id of 2 does not match existing log id of 0 for given author and document" + )] + #[case::owner_updates_to_wrong_but_free_log(LogId::new(2), KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic( + expected = "Entry's claimed log id of 1 does not match expected next log id of 0 for given author" + )] + #[case::new_author_updates_to_wrong_new_log(LogId::new(1), KeyPair::new())] + #[tokio::test] + async fn publish_update_log_tests( + #[case] log_id: LogId, + #[case] key_pair: KeyPair, + #[from(test_db_config)] + #[with(2, 1, 1)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + + let document_id = db.test_data.documents.first().unwrap(); + let document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); + let author_performing_update = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + let update_operation = Operation::new_update( + SCHEMA_ID.parse().unwrap(), + document_view_id.clone(), + operation_fields(doggo_test_fields()), + ) + .unwrap(); + + let latest_entry = db + .store + .get_latest_entry(&author_performing_update, &log_id) + .await + .unwrap(); + + let entry = Entry::new( + &log_id, + Some(&update_operation), + None, + latest_entry.as_ref().map(|entry| entry.hash()).as_ref(), + &latest_entry + .map(|entry| entry.seq_num().next().unwrap()) + .unwrap_or_default(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&update_operation).unwrap(); + + let result = publish(&db.store, &entry_encoded, &operation_encoded).await; + + result.unwrap(); + } + + #[rstest] + #[case::owner_publishes_to_correct_log(LogId::new(2), KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[case::new_author_publishes_to_new_log(LogId::new(0), KeyPair::new())] + #[should_panic( + expected = "Entry's claimed seq num of 1 does not match expected seq num of 2 for given author and log" + )] + #[case::owner_publishes_to_wrong_and_taken_log(LogId::new(1), KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic( + expected = "Entry's claimed log id of 3 does not match expected next log id of 2 for given author" + )] + #[case::owner_publishes_to_wrong_but_free_log(LogId::new(3), KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic( + expected = "Entry's claimed log id of 1 does not match expected next log id of 0 for given author" + )] + #[case::new_author_publishes_to_wrong_new_log(LogId::new(1), KeyPair::new())] + #[tokio::test] + async fn publish_create_log_tests( + #[case] log_id: LogId, + #[case] key_pair: KeyPair, + operation: Operation, + #[from(test_db_config)] + #[with(1, 2, 1)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + + let entry = Entry::new(&log_id, Some(&operation), None, None, &SeqNum::default()).unwrap(); + + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); + + let result = publish(&db.store, &entry_encoded, &operation_encoded).await; + + result.unwrap(); + } + + #[rstest] + #[should_panic( + expected = "You are trying to update or delete a document which has been deleted" + )] + #[case(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic( + expected = "You are trying to update or delete a document which has been deleted" + )] + #[case(KeyPair::new())] + #[tokio::test] + async fn publish_to_deleted_documents( + #[case] key_pair: KeyPair, + #[from(test_db_config)] + #[with(2, 1, 1, true)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + let document_id = db.test_data.documents.first().unwrap(); + let document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); + let author_performing_update = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + let delete_operation = + Operation::new_delete(SCHEMA_ID.parse().unwrap(), document_view_id.clone()).unwrap(); + + let latest_entry = db + .store + .get_latest_entry(&author_performing_update, &LogId::default()) + .await + .unwrap(); + + let entry = Entry::new( + &LogId::default(), + Some(&delete_operation), + None, + latest_entry.as_ref().map(|entry| entry.hash()).as_ref(), + &latest_entry + .map(|entry| entry.seq_num().next().unwrap()) + .unwrap_or_default(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&delete_operation).unwrap(); + + let result = publish(&db.store, &entry_encoded, &operation_encoded).await; + + result.unwrap(); + } + + #[rstest] + #[should_panic(expected = "Document is deleted")] + #[case(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap())] + #[should_panic(expected = "Document is deleted")] + #[case(KeyPair::new())] + #[tokio::test] + async fn next_args_deleted_documents( + #[case] key_pair: KeyPair, + #[from(test_db_config)] + #[with(3, 1, 1, true)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + populate_test_db(&mut db, &config).await; + let document_id = db.test_data.documents.first().unwrap(); + let document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + let result = next_args(&db.store, &author, Some(&document_view_id)).await; + + result.unwrap(); + } + + #[rstest] + fn publish_many_entries(key_pair: KeyPair, #[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let num_of_entries = 13; + let mut document_id: Option = None; + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + for index in 0..num_of_entries { + let document_view_id: Option = + document_id.clone().map(|id| id.as_str().parse().unwrap()); + + let next_entry_args = next_args(&db.store, &author, document_view_id.as_ref()) + .await + .unwrap(); + + let operation = if index == 0 { + create_operation(&[("name", OperationValue::Text("Panda".to_string()))]) + } else if index == (num_of_entries - 1) { + delete_operation(&next_entry_args.backlink.clone().unwrap().into()) + } else { + update_operation( + &[("name", OperationValue::Text("🐼".to_string()))], + &next_entry_args.backlink.clone().unwrap().into(), + ) + }; + + let entry = Entry::new( + &next_entry_args.log_id.into(), + Some(&operation), + next_entry_args.skiplink.map(Hash::from).as_ref(), + next_entry_args.backlink.map(Hash::from).as_ref(), + &next_entry_args.seq_num.into(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); + + if index == 0 { + document_id = Some(entry_encoded.hash().into()); + } + + let result = publish(&db.store, &entry_encoded, &operation_encoded).await; + + assert!(result.is_ok()); + } + }); + } + + #[rstest] + #[should_panic(expected = "Max sequence number reached for log 0")] + #[tokio::test] + async fn next_args_max_seq_num_reached( + key_pair: KeyPair, + #[from(test_db_config)] + #[with(2, 1, 1, false)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + + populate_test_db(&mut db, &config).await; + + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + let entry_two = db + .store + .get_entry_at_seq_num(&author, &LogId::default(), &SeqNum::new(2).unwrap()) + .await + .unwrap() + .unwrap(); + + let entry = Entry::new( + &LogId::default(), + Some(&entry_two.operation()), + Some(&random_hash()), + Some(&random_hash()), + &SeqNum::new(u64::MAX).unwrap(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + + let entry = + StorageEntry::new(&entry_encoded, &entry_two.operation_encoded().unwrap()).unwrap(); + + db.store + .entries + .lock() + .unwrap() + .insert(entry.hash(), entry.clone()); + + let result = next_args(&db.store, &author, Some(&entry_two.hash().into())).await; + + result.unwrap(); + } + + #[rstest] + #[should_panic(expected = "Max sequence number reached for log 0")] + #[tokio::test] + async fn publish_max_seq_num_reached( + key_pair: KeyPair, + #[from(test_db_config)] + #[with(2, 1, 1, false)] + config: PopulateDatabaseConfig, + ) { + let mut db = TestDatabase { + store: MemoryStore::default(), + test_data: TestData::default(), + }; + + populate_test_db(&mut db, &config).await; + + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + // Get the latest entry, we will use it's operation in all other entries (doesn't matter if it's a duplicate, just need the previous + // operations to exist). + let entry_two = db + .store + .get_entry_at_seq_num(&author, &LogId::default(), &SeqNum::new(2).unwrap()) + .await + .unwrap() + .unwrap(); + + // Create and insert the skiplink for MAX_SEQ_NUM entry + let skiplink = Entry::new( + &LogId::default(), + Some(&entry_two.operation()), + Some(&random_hash()), + Some(&random_hash()), + &SeqNum::new(18446744073709551611).unwrap(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&skiplink, &key_pair).unwrap(); + + let skiplink = + StorageEntry::new(&entry_encoded, &entry_two.operation_encoded().unwrap()).unwrap(); + + db.store + .entries + .lock() + .unwrap() + .insert(skiplink.hash(), skiplink.clone()); + + // Create and insert the backlink for MAX_SEQ_NUM entry + let backlink = Entry::new( + &LogId::default(), + Some(&entry_two.operation()), + Some(&random_hash()), + Some(&random_hash()), + &SeqNum::new(u64::MAX - 1).unwrap(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&backlink, &key_pair).unwrap(); + + let backlink = + StorageEntry::new(&entry_encoded, &entry_two.operation_encoded().unwrap()).unwrap(); + + db.store + .entries + .lock() + .unwrap() + .insert(backlink.hash(), backlink.clone()); + + // Create the MAX_SEQ_NUM entry using the above skiplink and backlink + let entry_with_max_seq_num = Entry::new( + &LogId::default(), + Some(&entry_two.operation()), + Some(&skiplink.hash()), + Some(&backlink.hash()), + &SeqNum::new(u64::MAX).unwrap(), + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry_with_max_seq_num, &key_pair).unwrap(); + + // Publish the MAX_SEQ_NUM entry + let result = publish( + &db.store, + &entry_encoded, + &entry_two.operation_encoded().unwrap(), + ) + .await; + + // try and get the MAX_SEQ_NUM entry again (it shouldn't be there) + let entry_at_max_seq_num = db + .store + .get_entry_by_hash(&entry_encoded.hash()) + .await + .unwrap(); + + // We expect the entry we published not to have been stored in the db + assert!(entry_at_max_seq_num.is_none()); + result.unwrap(); + } +} diff --git a/aquadoggo/src/errors.rs b/aquadoggo/src/errors.rs index d2fbc339f..41832b218 100644 --- a/aquadoggo/src/errors.rs +++ b/aquadoggo/src/errors.rs @@ -1,4 +1,4 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -/// A specialized result type for the storage provider. -pub type StorageProviderResult = anyhow::Result>; +/// A result type used in aquadoggo modules. +pub type Result = anyhow::Result>; diff --git a/aquadoggo/src/graphql/client/mutation.rs b/aquadoggo/src/graphql/client/mutation.rs index 74028859d..3da254f47 100644 --- a/aquadoggo/src/graphql/client/mutation.rs +++ b/aquadoggo/src/graphql/client/mutation.rs @@ -1,13 +1,13 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use async_graphql::{Context, Error, Object, Result}; -use p2panda_rs::operation::{AsVerifiedOperation, VerifiedOperation}; -use p2panda_rs::storage_provider::traits::{OperationStore, StorageProvider}; +use async_graphql::{Context, Object, Result}; +use p2panda_rs::entry::{decode_entry, EntrySigned}; +use p2panda_rs::operation::{Operation, OperationEncoded, OperationId}; use p2panda_rs::Validate; use crate::bus::{ServiceMessage, ServiceSender}; use crate::db::provider::SqlStorage; -use crate::db::request::PublishEntryRequest; +use crate::domain::publish; use crate::graphql::client::NextEntryArguments; use crate::graphql::scalars; @@ -34,48 +34,57 @@ impl ClientMutationRoot { let store = ctx.data::()?; let tx = ctx.data::()?; - // Parse and validate parameters - let args = PublishEntryRequest { - entry: entry.into(), - operation: operation.into(), - }; - args.validate()?; - - // Validate and store entry in database - // @TODO: Check all validation steps here for both entries and operations. Also, there is - // probably overlap in what replication needs in terms of validation? - let response = store.publish_entry(&args).await.map_err(Error::from)?; - - // Load related document from database - // @TODO: We probably have this instance already inside of "publish_entry"? - match store.get_document_by_entry(&args.entry.hash()).await? { - Some(document_id) => { - let verified_operation = - VerifiedOperation::new_from_entry(&args.entry, &args.operation)?; - - // Store operation in database - // @TODO: This is not done by "publish_entry", maybe it needs to move there as - // well? - store - .insert_operation(&verified_operation, &document_id) - .await?; - - // Send new operation on service communication bus, this will arrive eventually at - // the materializer service - if tx - .send(ServiceMessage::NewOperation( - verified_operation.operation_id().to_owned(), - )) - .is_err() - { - // Silently fail here as we don't mind if there are no subscribers. We have - // tests in other places to check if messages arrive. - } + let entry_signed: EntrySigned = entry.into(); + let operation_encoded: OperationEncoded = operation.into(); - Ok(response) - } - None => Err(Error::new("No related document found in database")), + ///////////////////////////////////////////////////// + // VALIDATE ENTRY AND OPERATION INTERNAL INTEGRITY // + ///////////////////////////////////////////////////// + + //@TODO: This pre-publishing validation needs to be reviewed in detail. Some problems come up here + // because we are not verifying the encoded operations against cddl yet. We should consider the + // role and expectations of `VerifiedOperation` and `StorageEntry` as well (they perform validation + // themselves bt are only constructed _after_ all other validation has taken place). Lot's to be + // improved here in general I think. Nice to see it as a very seperate step before `publish` i think. + + // Validate the encoded entry + entry_signed.validate()?; + + // Validate the encoded operation + operation_encoded.validate()?; + + // Decode the entry with it's operation. + // + // @TODO: Without this `publish` fails + decode_entry(&entry_signed, Some(&operation_encoded))?; + + // Also need to validate the decoded operation to catch internally invalid operations + // + // @TODO: Without this `publish` fails + let operation = Operation::from(&operation_encoded); + operation.validate()?; + + ///////////////////////////////////// + // PUBLISH THE ENTRY AND OPERATION // + ///////////////////////////////////// + + let next_args = publish(store, &entry_signed, &operation_encoded).await?; + + //////////////////////////////////////// + // SEND THE OPERATION TO MATERIALIZER // + //////////////////////////////////////// + + // Send new operation on service communication bus, this will arrive eventually at + // the materializer service + + let operation_id: OperationId = entry_signed.hash().into(); + + if tx.send(ServiceMessage::NewOperation(operation_id)).is_err() { + // Silently fail here as we don't mind if there are no subscribers. We have + // tests in other places to check if messages arrive. } + + Ok(next_args) } } @@ -87,12 +96,12 @@ mod tests { use ciborium::cbor; use ciborium::value::Value; use once_cell::sync::Lazy; - use p2panda_rs::document::DocumentId; + use p2panda_rs::document::{DocumentId, DocumentViewId}; use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned, LogId, SeqNum}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::{Author, KeyPair}; use p2panda_rs::operation::{Operation, OperationEncoded, OperationValue}; - use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore, StorageProvider}; + use p2panda_rs::storage_provider::traits::EntryStore; use p2panda_rs::test_utils::constants::{HASH, PRIVATE_KEY, SCHEMA_ID}; use p2panda_rs::test_utils::fixtures::{ create_operation, delete_operation, entry_signed_encoded, entry_signed_encoded_unvalidated, @@ -103,8 +112,9 @@ mod tests { use tokio::sync::broadcast; use crate::bus::ServiceMessage; - use crate::db::request::EntryArgsRequest; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::domain::next_args; use crate::http::{build_server, HttpServiceContext}; use crate::test_helpers::TestClient; @@ -215,7 +225,7 @@ mod tests { #[rstest] fn publish_entry(#[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); let response = context.schema.execute(publish_entry_request).await; @@ -239,7 +249,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, mut rx) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); @@ -259,7 +269,7 @@ mod tests { #[rstest] fn publish_entry_error_handling(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); @@ -283,7 +293,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); let client = TestClient::new(build_server(context)); @@ -444,34 +454,132 @@ mod tests { &OPERATION_ENCODED, "Could not decode payload hash DecodeError" )] + #[case::create_operation_with_previous_operations( + &entry_signed_encoded_unvalidated( + 1, + 0, + None, + None, + Some(Operation::from(&OperationEncoded::new(&CREATE_OPERATION_WITH_PREVIOUS_OPS).unwrap())), + key_pair(PRIVATE_KEY) + ), + &CREATE_OPERATION_WITH_PREVIOUS_OPS, + "previous_operations field should be empty" + )] + #[case::update_operation_no_previous_operations( + &entry_signed_encoded_unvalidated( + 1, + 0, + None, + None, + Some(Operation::from(&OperationEncoded::new(&UPDATE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + key_pair(PRIVATE_KEY) + ), + &UPDATE_OPERATION_NO_PREVIOUS_OPS, + "previous_operations field can not be empty" + )] + #[case::delete_operation_no_previous_operations( + &entry_signed_encoded_unvalidated( + 1, + 0, + None, + None, + Some(Operation::from(&OperationEncoded::new(&DELETE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + key_pair(PRIVATE_KEY) + ), + &DELETE_OPERATION_NO_PREVIOUS_OPS, + "previous_operations field can not be empty" + )] + fn validates_encoded_entry_and_operation_integrity( + #[case] entry_encoded: &str, + #[case] operation_encoded: &str, + #[case] expected_error_message: &str, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + let entry_encoded = entry_encoded.to_string(); + let operation_encoded = operation_encoded.to_string(); + let expected_error_message = expected_error_message.to_string(); + + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let publish_entry_request = publish_entry_request(&entry_encoded, &operation_encoded); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + let response = response.json::().await; + for error in response.get("errors").unwrap().as_array().unwrap() { + assert_eq!( + error.get("message").unwrap().as_str().unwrap(), + expected_error_message + ) + } + }); + } + + #[rstest] #[case::backlink_and_skiplink_not_in_db( &entry_signed_encoded_unvalidated( 8, - 0, + 1, Some(HASH.parse().unwrap()), Some(Hash::new_from_bytes(vec![2, 3, 4]).unwrap()), Some(Operation::from(&OperationEncoded::new(&OPERATION_ENCODED).unwrap())), key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not find expected backlink in database for entry with id: " + "Entry's claimed seq num of 8 does not match expected seq num of 1 for given author and log" )] #[case::backlink_not_in_db( &entry_signed_encoded_unvalidated( - 2, + 11, 0, - Some(HASH.parse().unwrap()), + Some(random_hash()), + None, + Some(Operation::from(&OperationEncoded::new(&OPERATION_ENCODED).unwrap())), + key_pair(PRIVATE_KEY) + ), + &OPERATION_ENCODED, + "The backlink hash encoded in the entry does not match the lipmaa entry provided" //Think this error message is wrong + )] + #[case::not_the_next_seq_num( + &entry_signed_encoded_unvalidated( + 14, + 0, + Some(random_hash()), None, Some(Operation::from(&OperationEncoded::new(&OPERATION_ENCODED).unwrap())), key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not find expected backlink in database for entry with id: " + "Entry's claimed seq num of 14 does not match expected seq num of 11 for given author and log" + )] + #[case::occupied_seq_num( + &entry_signed_encoded_unvalidated( + 6, + 0, + Some(random_hash()), + None, + Some(Operation::from(&OperationEncoded::new(&OPERATION_ENCODED).unwrap())), + key_pair(PRIVATE_KEY) + ), + &OPERATION_ENCODED, + "Entry's claimed seq num of 6 does not match expected seq num of 11 for given author and log" )] #[case::previous_operations_not_in_db( &entry_signed_encoded_unvalidated( 1, - 0, + 1, None, None, Some( @@ -497,55 +605,33 @@ mod tests { None ).as_str().to_owned() }, - "Could not find document for entry in database with id: " + " not found, could not determine document id" )] - #[case::create_operation_with_previous_operations( + #[case::claimed_log_id_does_not_match_expected( &entry_signed_encoded_unvalidated( 1, - 0, - None, - None, - Some(Operation::from(&OperationEncoded::new(&CREATE_OPERATION_WITH_PREVIOUS_OPS).unwrap())), - key_pair(PRIVATE_KEY) - ), - &CREATE_OPERATION_WITH_PREVIOUS_OPS, - "previous_operations field should be empty" - )] - #[case::update_operation_no_previous_operations( - &entry_signed_encoded_unvalidated( - 1, - 0, - None, - None, - Some(Operation::from(&OperationEncoded::new(&UPDATE_OPERATION_NO_PREVIOUS_OPS).unwrap())), - key_pair(PRIVATE_KEY) - ), - &UPDATE_OPERATION_NO_PREVIOUS_OPS, - "previous_operations field can not be empty" - )] - #[case::delete_operation_no_previous_operations( - &entry_signed_encoded_unvalidated( - 1, - 0, + 2, None, None, - Some(Operation::from(&OperationEncoded::new(&DELETE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + Some(Operation::from(&OperationEncoded::new(&OPERATION_ENCODED).unwrap())), key_pair(PRIVATE_KEY) ), - &DELETE_OPERATION_NO_PREVIOUS_OPS, - "previous_operations field can not be empty" + &OPERATION_ENCODED, + "Entry's claimed log id of 2 does not match expected next log id of 1 for given author" )] - fn invalid_requests_fail( + fn validation_of_entry_and_operation_values( #[case] entry_encoded: &str, #[case] operation_encoded: &str, #[case] expected_error_message: &str, - #[from(test_db)] runner: TestDatabaseRunner, + #[from(test_db)] + #[with(10, 1, 1)] + runner: TestDatabaseRunner, ) { let entry_encoded = entry_encoded.to_string(); let operation_encoded = operation_encoded.to_string(); let expected_error_message = expected_error_message.to_string(); - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); let client = TestClient::new(build_server(context)); @@ -574,7 +660,7 @@ mod tests { #[rstest] fn publish_many_entries(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let key_pairs = vec![KeyPair::new(), KeyPair::new()]; let num_of_entries = 13; @@ -583,15 +669,13 @@ mod tests { let client = TestClient::new(build_server(context)); for key_pair in &key_pairs { - let mut document: Option = None; + let mut document_id: Option = None; let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); for index in 0..num_of_entries { - let next_entry_args = db - .store - .get_entry_args(&EntryArgsRequest { - public_key: author.clone(), - document_id: document.as_ref().cloned(), - }) + let document_view_id: Option = + document_id.clone().map(|id| id.as_str().parse().unwrap()); + + let next_entry_args = next_args(&db.store, &author, document_view_id.as_ref()) .await .unwrap(); @@ -619,7 +703,7 @@ mod tests { let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); if index == 0 { - document = Some(entry_encoded.hash().into()); + document_id = Some(entry_encoded.hash().into()); } // Prepare a publish entry request for each entry. @@ -649,27 +733,23 @@ mod tests { #[with(1, 1, 1, false, SCHEMA_ID.parse().unwrap())] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|populated_db: TestDatabase| async move { + runner.with_db_teardown(|populated_db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); let context = HttpServiceContext::new(populated_db.store.clone(), tx); let client = TestClient::new(build_server(context)); - // Get the entries from the prepopulated store. - let mut entries = populated_db + // Get the one entry from the store. + let entries = populated_db .store .get_entries_by_schema(&SCHEMA_ID.parse().unwrap()) .await .unwrap(); + let entry = entries.first().unwrap(); - // Sort them by seq_num. - entries.sort_by_key(|entry| entry.seq_num().as_u64()); - - let duplicate_entry = entries.first().unwrap(); - - // Prepare a publish entry request for each entry. + // Prepare a publish entry request for the entry. let publish_entry_request = publish_entry_request( - duplicate_entry.entry_signed().as_str(), - duplicate_entry.operation_encoded().unwrap().as_str(), + entry.entry_signed().as_str(), + entry.operation_encoded().unwrap().as_str(), ); // Publish the entry and parse response. @@ -685,11 +765,8 @@ mod tests { let response = response.json::().await; - // @TODO: This currently throws an internal SQL error to the API user, I think we'd - // like a nicer error message here: - // https://github.com/p2panda/aquadoggo/issues/159 for error in response.get("errors").unwrap().as_array().unwrap() { - assert!(error.get("message").is_some()) + assert_eq!(error.get("message").unwrap(), "Entry's claimed seq num of 1 does not match expected seq num of 2 for given author and log") } }); } diff --git a/aquadoggo/src/graphql/client/query.rs b/aquadoggo/src/graphql/client/query.rs index 9d0dc307f..d8d7e3bc6 100644 --- a/aquadoggo/src/graphql/client/query.rs +++ b/aquadoggo/src/graphql/client/query.rs @@ -1,12 +1,12 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use async_graphql::{Context, Error, Object, Result}; -use p2panda_rs::document::DocumentId; -use p2panda_rs::storage_provider::traits::StorageProvider; +use async_graphql::{Context, Object, Result}; +use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::identity::Author; use p2panda_rs::Validate; use crate::db::provider::SqlStorage; -use crate::db::request::EntryArgsRequest; +use crate::domain::next_args; use crate::graphql::client::response::NextEntryArguments; use crate::graphql::scalars; @@ -33,25 +33,38 @@ impl ClientRoot { )] document_id: Option, ) -> Result { - let args = EntryArgsRequest { - public_key: public_key.into(), - document_id: document_id.map(DocumentId::from), - }; - args.validate()?; + // @TODO: The api for `next_entry_args` needs to be updated to accept a `DocumentViewId` - // Load and return next entry arguments + // Access the store from context. let store = ctx.data::()?; - store.get_entry_args(&args).await.map_err(Error::from) + + // Convert and validate passed parameters. + let public_key: Author = public_key.into(); + let document_view_id: Option = document_id + .map(DocumentId::from) + .map(|id| id.as_str().parse().unwrap()); + + public_key.validate()?; + if let Some(ref document_view_id) = document_view_id { + document_view_id.validate()?; + } + + // Calculate next entry args. + next_args(store, &public_key, document_view_id.as_ref()).await } } #[cfg(test)] mod tests { + use std::convert::TryFrom; + use async_graphql::{value, Response}; + use p2panda_rs::identity::Author; use rstest::rstest; use serde_json::json; use tokio::sync::broadcast; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::http::build_server; use crate::http::HttpServiceContext; @@ -59,7 +72,7 @@ mod tests { #[rstest] fn next_entry_args_valid_query(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); let client = TestClient::new(build_server(context)); @@ -99,9 +112,66 @@ mod tests { }) } + #[rstest] + fn next_entry_args_valid_query_with_document_id( + #[with(1, 1, 1)] + #[from(test_db)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let document_id = db.test_data.documents.get(0).unwrap(); + let author = + Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); + + // Selected fields need to be alphabetically sorted because that's what the `json` + // macro that is used in the assert below produces. + let received_entry_args = client + .post("/graphql") + .json(&json!({ + "query": + format!( + "{{ + nextEntryArgs( + publicKey: \"{}\", + documentId: \"{}\" + ) {{ + logId, + seqNum, + backlink, + skiplink + }} + }}", + author.as_str(), + document_id.as_str() + ) + })) + .send() + .await + .json::() + .await; + + assert!(received_entry_args.is_ok()); + assert_eq!( + received_entry_args.data, + value!({ + "nextEntryArgs": { + "logId": "0", + "seqNum": "2", + "backlink": "0020c8e09edd863b308f9c60b8ba506f29da512d0c9b5a131287f402c57777af5678", + "skiplink": null, + } + }) + ); + }) + } + #[rstest] fn next_entry_args_error_response(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); let client = TestClient::new(build_server(context)); diff --git a/aquadoggo/src/graphql/client/response.rs b/aquadoggo/src/graphql/client/response.rs index 17070ded1..90f278fe5 100644 --- a/aquadoggo/src/graphql/client/response.rs +++ b/aquadoggo/src/graphql/client/response.rs @@ -1,9 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use async_graphql::SimpleObject; -use p2panda_rs::entry::{LogId, SeqNum}; -use p2panda_rs::hash::Hash; -use p2panda_rs::storage_provider::traits::{AsEntryArgsResponse, AsPublishEntryResponse}; use serde::{Deserialize, Serialize}; use crate::graphql::scalars; @@ -25,25 +22,3 @@ pub struct NextEntryArguments { /// Hash of the entry skiplink. pub skiplink: Option, } - -impl AsEntryArgsResponse for NextEntryArguments { - fn new(backlink: Option, skiplink: Option, seq_num: SeqNum, log_id: LogId) -> Self { - Self { - log_id: log_id.into(), - seq_num: seq_num.into(), - backlink: backlink.map(scalars::EntryHash::from), - skiplink: skiplink.map(scalars::EntryHash::from), - } - } -} - -impl AsPublishEntryResponse for NextEntryArguments { - fn new(backlink: Option, skiplink: Option, seq_num: SeqNum, log_id: LogId) -> Self { - Self { - log_id: log_id.into(), - seq_num: seq_num.into(), - backlink: backlink.map(scalars::EntryHash::from), - skiplink: skiplink.map(scalars::EntryHash::from), - } - } -} diff --git a/aquadoggo/src/graphql/replication/query.rs b/aquadoggo/src/graphql/replication/query.rs index 3a896482c..ada1aa2f2 100644 --- a/aquadoggo/src/graphql/replication/query.rs +++ b/aquadoggo/src/graphql/replication/query.rs @@ -162,6 +162,7 @@ mod tests { use p2panda_rs::test_utils::fixtures::random_hash; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{ populate_test_db, test_db, with_db_manager_teardown, PopulateDatabaseConfig, TestDatabase, TestDatabaseManager, TestDatabaseRunner, @@ -175,7 +176,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) @@ -216,7 +217,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, #[from(random_hash)] random_hash: Hash, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) @@ -239,7 +240,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) @@ -287,7 +288,7 @@ mod tests { #[rstest] fn entry_by_log_id_and_seq_num_not_found(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) diff --git a/aquadoggo/src/graphql/replication/response.rs b/aquadoggo/src/graphql/replication/response.rs index 216a2c895..54979db13 100644 --- a/aquadoggo/src/graphql/replication/response.rs +++ b/aquadoggo/src/graphql/replication/response.rs @@ -81,6 +81,7 @@ mod tests { use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; use rstest::rstest; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::graphql::replication::ReplicationRoot; @@ -90,7 +91,7 @@ mod tests { #[with(13, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store.clone()) diff --git a/aquadoggo/src/http/service.rs b/aquadoggo/src/http/service.rs index 7fdc29be4..b69fb16a9 100644 --- a/aquadoggo/src/http/service.rs +++ b/aquadoggo/src/http/service.rs @@ -72,6 +72,7 @@ mod tests { use serde_json::json; use tokio::sync::broadcast; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::http::context::HttpServiceContext; use crate::test_helpers::TestClient; @@ -80,7 +81,7 @@ mod tests { #[rstest] fn graphql_endpoint(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let (tx, _) = broadcast::channel(16); let context = HttpServiceContext::new(db.store, tx); let client = TestClient::new(build_server(context)); diff --git a/aquadoggo/src/lib.rs b/aquadoggo/src/lib.rs index 30462cb01..ef8a3845a 100644 --- a/aquadoggo/src/lib.rs +++ b/aquadoggo/src/lib.rs @@ -17,6 +17,7 @@ mod bus; mod config; mod context; mod db; +mod domain; mod errors; mod graphql; mod http; @@ -25,6 +26,7 @@ mod materializer; mod node; mod replication; mod schema; +mod validation; #[cfg(test)] mod test_helpers; diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index 373aa3e3e..ec1764fb9 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -154,6 +154,7 @@ mod tests { use tokio::task; use crate::context::Context; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{send_to_store, test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::{Task, TaskInput}; @@ -169,7 +170,7 @@ mod tests { runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Identify document and operation which was inserted for testing let document_id = db.test_data.documents.first().unwrap(); let verified_operation = db @@ -235,7 +236,7 @@ mod tests { .await .unwrap() .expect("We expect that the document is `Some`"); - assert_eq!(document.id().as_str(), document_id.as_str()); + assert_eq!(document.id().to_string(), document_id.to_string()); assert_eq!( document.fields().get("name").unwrap().value().to_owned(), OperationValue::Text("panda".into()) @@ -250,7 +251,7 @@ mod tests { runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Identify document and operation which was inserted for testing let document_id = db.test_data.documents.first().unwrap(); @@ -305,7 +306,7 @@ mod tests { .await .unwrap() .expect("We expect that the document is `Some`"); - assert_eq!(document.id().as_str(), document_id.as_str()); + assert_eq!(document.id().to_string(), document_id.to_string()); assert_eq!( document.fields().get("name").unwrap().value().to_owned(), OperationValue::Text("panda".into()) @@ -320,7 +321,7 @@ mod tests { runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Identify key_[air, document and operation which was inserted for testing let key_pair = db.test_data.key_pairs.first().unwrap(); let document_id = db.test_data.documents.first().unwrap(); @@ -460,7 +461,7 @@ mod tests { key_pair: KeyPair, ) { // Prepare empty database - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Prepare arguments for service let context = Context::new( db.store.clone(), diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index c50161f20..25a65cbf8 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -187,6 +187,7 @@ mod tests { use crate::config::Configuration; use crate::context::Context; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{ insert_entry_operation_and_view, send_to_store, test_db, TestDatabase, TestDatabaseRunner, }; @@ -306,7 +307,7 @@ mod tests { #[case] runner: TestDatabaseRunner, #[case] expected_next_tasks: usize, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -346,7 +347,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -410,7 +411,7 @@ mod tests { #[case] document_view_id: Option, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store, Configuration::default(), @@ -456,7 +457,7 @@ mod tests { ) )] fn fails_on_deleted_documents(#[case] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -493,7 +494,7 @@ mod tests { ])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -578,7 +579,7 @@ mod tests { ])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index 9672932ea..435141a26 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -195,6 +195,7 @@ mod tests { use crate::config::Configuration; use crate::context::Context; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{send_to_store, test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; @@ -215,7 +216,7 @@ mod tests { )] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store, Configuration::default(), @@ -244,7 +245,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents.first().unwrap(); let key_pair = db.test_data.key_pairs.first().unwrap(); @@ -270,7 +271,7 @@ mod tests { Some(document_id.as_str().parse().unwrap()), None, ), - Some(document_id), + Some(&document_id), key_pair, ) .await; @@ -280,7 +281,11 @@ mod tests { assert!(reduce_task(context.clone(), input).await.is_ok()); // The new view should exist and the document should refer to it. - let document_view = context.store.get_document_by_id(document_id).await.unwrap(); + let document_view = context + .store + .get_document_by_id(&document_id) + .await + .unwrap(); assert_eq!( document_view.unwrap().get("username").unwrap().value(), &OperationValue::Text("meeeeeee".to_string()) @@ -294,7 +299,7 @@ mod tests { #[with( 2, 1, 1, false, SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_operations = db .store .get_operations_by_document_id(&db.test_data.documents[0]) @@ -355,7 +360,7 @@ mod tests { #[with(3, 1, 20, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -402,7 +407,7 @@ mod tests { #[case] runner: TestDatabaseRunner, #[case] is_next_task: bool, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -424,7 +429,7 @@ mod tests { #[case] document_view_id: Option, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store, Configuration::default(), @@ -445,7 +450,7 @@ mod tests { #[from(random_document_view_id)] document_view_id: DocumentViewId, ) { // Prepare empty database. - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), diff --git a/aquadoggo/src/materializer/tasks/schema.rs b/aquadoggo/src/materializer/tasks/schema.rs index fd4b5aecb..14b199f34 100644 --- a/aquadoggo/src/materializer/tasks/schema.rs +++ b/aquadoggo/src/materializer/tasks/schema.rs @@ -126,6 +126,7 @@ mod tests { use rstest::rstest; use crate::context::Context; + use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{send_to_store, test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; @@ -138,7 +139,7 @@ mod tests { /// Insert a test schema definition and schema field definition and run reduce tasks for both. async fn create_schema_documents( context: &Context, - db: &TestDatabase, + db: &TestDatabase, ) -> (DocumentViewId, DocumentViewId) { // Create field definition let create_field_definition = Operation::new_create( @@ -210,7 +211,7 @@ mod tests { #[with(1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 574d60a3c..8006524cc 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -5,21 +5,17 @@ use std::time::Duration; use anyhow::{anyhow, Result}; use bamboo_rs_core_ed25519_yasmf::verify::verify_batch; -use futures::TryFutureExt; use log::{debug, error, trace, warn}; use p2panda_rs::entry::LogId; use p2panda_rs::entry::SeqNum; use p2panda_rs::identity::Author; -use p2panda_rs::operation::{AsVerifiedOperation, VerifiedOperation}; -use p2panda_rs::storage_provider::traits::{ - AsStorageEntry, EntryStore, OperationStore, StorageProvider, -}; +use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; use tokio::task; use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; -use crate::db::request::PublishEntryRequest; use crate::db::stores::StorageEntry; +use crate::domain::publish; use crate::graphql::replication::client; use crate::manager::{ServiceReadySender, Shutdown}; @@ -160,66 +156,29 @@ async fn insert_new_entries( tx: ServiceSender, ) -> Result<()> { for entry in new_entries { - // Parse and validate parameters - let args = PublishEntryRequest { - entry: entry.entry_signed().clone(), - // We know a storage entry has an operation so we safely unwrap here. - operation: entry.operation_encoded().unwrap().clone(), - }; - // This is the method used to publish entries arriving from clients. They all contain a // payload (operation). // // @TODO: This is not a great fit for replication, as it performs validation we either do - // not need or already done in a previous step. We plan to refactor this into a more + // not need or have already done in a previous step. We plan to refactor this into a more // modular set of methods which can definitely be used here more cleanly. For now, we do it // this way. - context - .0 - .store - .publish_entry(&args) - .await - .map_err(|err| anyhow!(format!("Error inserting new entry into db: {:?}", err)))?; - - // @TODO: We have to publish the operation too, once again, this will be improved with the - // above mentioned refactor. - let document_id = context - .0 - .store - .get_document_by_entry(&entry.hash()) - .await - .map_err(|err| anyhow!(format!("Error retrieving document id from db: {:?}", err)))?; - - match document_id { - Some(document_id) => { - let operation = VerifiedOperation::new_from_entry( - entry.entry_signed(), - entry.operation_encoded().unwrap(), - ) - // Safely unwrap here as the entry and operation were already validated. - .unwrap(); - - context - .0 - .store - .insert_operation(&operation, &document_id) - .map_ok({ - let entry = entry.clone(); - let tx = tx.clone(); + // + // @TODO: Additionally, when we implement payload deletion and partial replication we will + // be expecting entries to arrive here possibly without payloads. + + publish( + &context.0.store, + entry.entry_signed(), + entry + .operation_encoded() + .expect("All stored entries contain an operation"), + ) + .await + .map_err(|err| anyhow!(format!("Error inserting new entry into db: {:?}", err)))?; - move |_| { - send_new_entry_service_message(tx.clone(), &entry); - } - }) - .map_err(|err| { - anyhow!(format!("Error inserting new operation into db: {:?}", err)) - }) - .await - } - None => Err(anyhow!( - "No document found for published operation".to_string() - )), - }?; + // Send new entry & operation to other services. + send_new_entry_service_message(tx.clone(), entry); } Ok(()) @@ -252,7 +211,7 @@ async fn add_certpool_to_entries_for_verification( Ok(()) } -/// Helper method to inform other services (like materialization service) about new operations. +/// Helper method to inform other services (like materialisation service) about new operations. fn send_new_entry_service_message(tx: ServiceSender, entry: &StorageEntry) { let bus_message = ServiceMessage::NewOperation(entry.entry_signed().hash().into()); diff --git a/aquadoggo/src/test_helpers.rs b/aquadoggo/src/test_helpers.rs index 8cc924da7..7ee5a2bec 100644 --- a/aquadoggo/src/test_helpers.rs +++ b/aquadoggo/src/test_helpers.rs @@ -12,10 +12,14 @@ use http::{Request, StatusCode}; use hyper::{Body, Server}; use once_cell::sync::Lazy; use serde::Deserialize; +use sqlx::migrate::MigrateDatabase; +use sqlx::Any; use tokio::task::{self, JoinHandle}; use tower::make::Shared; use tower_service::Service; +use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; + /// Configuration used in test helper methods. #[derive(Deserialize, Debug)] #[serde(default)] @@ -162,3 +166,33 @@ pub fn shutdown_handle() -> JoinHandle<()> { shutdown } + +/// Create test database. +pub async fn initialize_db() -> Pool { + initialize_db_with_url(&TEST_CONFIG.database_url).await +} + +/// Create test database. +pub async fn initialize_db_with_url(url: &str) -> Pool { + // Reset database first + drop_database().await; + create_database(url).await.unwrap(); + + // Create connection pool and run all migrations + let pool = connection_pool(url, 25).await.unwrap(); + if run_pending_migrations(&pool).await.is_err() { + pool.close().await; + } + + pool +} + +// Delete test database +pub async fn drop_database() { + if Any::database_exists(&TEST_CONFIG.database_url) + .await + .unwrap() + { + Any::drop_database(&TEST_CONFIG.database_url).await.unwrap(); + } +} diff --git a/aquadoggo/src/validation.rs b/aquadoggo/src/validation.rs new file mode 100644 index 000000000..c764d4859 --- /dev/null +++ b/aquadoggo/src/validation.rs @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use anyhow::{anyhow, ensure, Result}; +use p2panda_rs::document::DocumentId; +use p2panda_rs::entry::{LogId, SeqNum}; +use p2panda_rs::identity::Author; +use p2panda_rs::operation::AsOperation; +use p2panda_rs::storage_provider::traits::StorageProvider; +use p2panda_rs::Human; + +/// Verify that a claimed seq num is the next sequence number following the latest. +/// +/// Performs two steps: +/// - determines the expected sequence number +/// - if `latest_seq_num` is `Some` by incrementing that +/// - if `latest_seq_num` is `None` by setting it to 1 +/// - ensures the claimed sequence number is equal to the expected one. +pub fn is_next_seq_num(latest_seq_num: Option<&SeqNum>, claimed_seq_num: &SeqNum) -> Result<()> { + let expected_seq_num = match latest_seq_num { + Some(seq_num) => { + let mut seq_num = seq_num.to_owned(); + increment_seq_num(&mut seq_num) + } + None => Ok(SeqNum::default()), + }?; + + ensure!( + expected_seq_num == *claimed_seq_num, + anyhow!( + "Entry's claimed seq num of {} does not match expected seq num of {} for given author and log", + claimed_seq_num.as_u64(), + expected_seq_num.as_u64() + ) + ); + Ok(()) +} + +/// Verify that a log id is correctly chosen for a pair of author and document id. +/// +/// This method handles both the case where the claimed log id already exists for this author +/// and where it is a new log. +/// +/// The following steps are taken: +/// - Retrieve the stored log id for the document id +/// - If found, ensure it matches the claimed log id +/// - If not found retrieve the next available log id for this author and ensure that matches +pub async fn verify_log_id( + store: &S, + author: &Author, + claimed_log_id: &LogId, + document_id: &DocumentId, +) -> Result<()> { + // Check if there is a log id registered for this document and public key already in the store. + match store.get(author, document_id).await? { + Some(expected_log_id) => { + // If there is, check it matches the log id encoded in the entry. + ensure!( + *claimed_log_id == expected_log_id, + anyhow!( + "Entry's claimed log id of {} does not match existing log id of {} for given author and document", + claimed_log_id.as_u64(), + expected_log_id.as_u64() + ) + ); + } + None => { + // If there isn't, check that the next log id for this author matches the one encoded in + // the entry. + let expected_log_id = next_log_id(store, author).await?; + + ensure!( + *claimed_log_id == expected_log_id, + anyhow!( + "Entry's claimed log id of {} does not match expected next log id of {} for given author", + claimed_log_id.as_u64(), + expected_log_id.as_u64() + ) + ); + } + }; + Ok(()) +} + +/// Get the entry that _should_ be the skiplink target for the given author, log id and seq num. +/// +/// This method determines the expected skiplink given an author, log id and sequence number. It +/// _does not_ verify that this matches the skiplink encoded on any entry. +/// +/// An error is returned if: +/// - seq num 1 was passed in, which can not have a skiplink +/// - the expected skiplink target could not be found in the database. +pub async fn get_expected_skiplink( + store: &S, + author: &Author, + log_id: &LogId, + seq_num: &SeqNum, +) -> Result { + ensure!( + !seq_num.is_first(), + anyhow!("Entry with seq num 1 can not have skiplink") + ); + + // Unwrap because method always returns `Some` for seq num > 1 + let skiplink_seq_num = seq_num.skiplink_seq_num().unwrap(); + + let skiplink_entry = store + .get_entry_at_seq_num(author, log_id, &skiplink_seq_num) + .await?; + + match skiplink_entry { + Some(entry) => Ok(entry), + None => Err(anyhow!( + "Expected skiplink target not found in store: {}, log id {}, seq num {}", + author.display(), + log_id.as_u64(), + skiplink_seq_num.as_u64() + )), + } +} + +/// Ensure that a document is not deleted. +/// +/// Takes the following steps: +/// - retrieve all operations for the given document id +/// - ensure none of them contain a DELETE action +pub async fn ensure_document_not_deleted( + store: &S, + document_id: &DocumentId, +) -> Result<()> { + // Retrieve the document view for this document, if none is found, then it is deleted. + let operations = store.get_operations_by_document_id(document_id).await?; + ensure!( + !operations.iter().any(|operation| operation.is_delete()), + anyhow!("Document is deleted") + ); + Ok(()) +} + +/// Retrieve the next log id for a given author. +/// +/// Takes the following steps: +/// - retrieve the latest log id for the given author +/// - safely increment it by 1 +pub async fn next_log_id(store: &S, author: &Author) -> Result { + let latest_log_id = store.latest_log_id(author).await?; + + match latest_log_id { + Some(mut log_id) => increment_log_id(&mut log_id), + None => Ok(LogId::default()), + } +} + +/// Safely increment a sequence number by one. +pub fn increment_seq_num(seq_num: &mut SeqNum) -> Result { + match seq_num.next() { + Some(next_seq_num) => Ok(next_seq_num), + None => Err(anyhow!("Max sequence number reached")), + } +} + +/// Safely increment a log id by one. +pub fn increment_log_id(log_id: &mut LogId) -> Result { + match log_id.next() { + Some(next_log_id) => Ok(next_log_id), + None => Err(anyhow!("Max log id reached")), + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use p2panda_rs::document::DocumentId; + use p2panda_rs::entry::{LogId, SeqNum}; + use p2panda_rs::identity::{Author, KeyPair}; + use p2panda_rs::storage_provider::traits::AsStorageEntry; + use p2panda_rs::test_utils::constants::PRIVATE_KEY; + use p2panda_rs::test_utils::fixtures::{key_pair, random_document_id}; + use rstest::rstest; + + use crate::db::provider::SqlStorage; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + + use super::{ + ensure_document_not_deleted, get_expected_skiplink, increment_log_id, increment_seq_num, + is_next_seq_num, verify_log_id, + }; + + #[rstest] + #[case(LogId::new(0), LogId::new(1))] + #[should_panic(expected = "Max log id reached")] + #[case(LogId::new(u64::MAX), LogId::new(1))] + fn increments_log_id(#[case] log_id: LogId, #[case] expected_next_log_id: LogId) { + let mut log_id = log_id; + let next_log_id = increment_log_id(&mut log_id).unwrap(); + assert_eq!(next_log_id, expected_next_log_id) + } + + #[rstest] + #[case( SeqNum::new(1).unwrap(), SeqNum::new(2).unwrap())] + #[should_panic(expected = "Max sequence number reached")] + #[case(SeqNum::new(u64::MAX).unwrap(), SeqNum::new(1).unwrap())] + fn increments_seq_num(#[case] seq_num: SeqNum, #[case] expected_next_seq_num: SeqNum) { + let mut seq_num = seq_num; + let next_seq_num = increment_seq_num(&mut seq_num).unwrap(); + assert_eq!(next_seq_num, expected_next_seq_num) + } + + #[rstest] + #[case::valid_seq_num(Some(SeqNum::new(2).unwrap()), SeqNum::new(3).unwrap())] + #[should_panic( + expected = "Entry's claimed seq num of 2 does not match expected seq num of 3 for given author and log" + )] + #[case::seq_num_already_used(Some(SeqNum::new(2).unwrap()),SeqNum::new(2).unwrap())] + #[should_panic( + expected = "Entry's claimed seq num of 4 does not match expected seq num of 3 for given author and log" + )] + #[case::seq_num_too_high(Some(SeqNum::new(2).unwrap()),SeqNum::new(4).unwrap())] + #[should_panic(expected = "Max sequence number reached")] + #[case::seq_num_too_high(Some(SeqNum::new(u64::MAX).unwrap()),SeqNum::new(4).unwrap())] + #[should_panic( + expected = "Entry's claimed seq num of 3 does not match expected seq num of 1 for given author and log" + )] + #[case::no_seq_num(None, SeqNum::new(3).unwrap())] + fn verifies_seq_num(#[case] latest_seq_num: Option, #[case] claimed_seq_num: SeqNum) { + is_next_seq_num(latest_seq_num.as_ref(), &claimed_seq_num).unwrap(); + } + + #[rstest] + #[case::existing_document(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::default(), None)] + #[case::new_document(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(2), Some(random_document_id()))] + #[case::existing_document_new_author(KeyPair::new(), LogId::new(0), None)] + #[should_panic( + expected = "Entry's claimed log id of 1 does not match existing log id of 0 for given author and document" + )] + #[case::already_occupied_log_id_for_existing_document(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(1), None)] + #[should_panic( + expected = "Entry's claimed log id of 2 does not match existing log id of 0 for given author and document" + )] + #[case::new_log_id_for_existing_document(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(2), None)] + #[should_panic( + expected = "Entry's claimed log id of 1 does not match expected next log id of 0 for given author" + )] + #[case::new_author_not_next_log_id(KeyPair::new(), LogId::new(1), None)] + #[should_panic( + expected = "Entry's claimed log id of 0 does not match expected next log id of 2 for given author" + )] + #[case::new_document_occupied_log_id(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(0), Some(random_document_id()))] + #[should_panic( + expected = "Entry's claimed log id of 3 does not match expected next log id of 2 for given author" + )] + #[case::new_document_not_next_log_id(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(3), Some(random_document_id()))] + fn verifies_log_id( + #[case] key_pair: KeyPair, + #[case] claimed_log_id: LogId, + #[case] document_id: Option, + #[from(test_db)] + #[with(2, 2, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + // Unwrap the passed document id or select the first valid one from the database. + let document_id = + document_id.unwrap_or_else(|| db.test_data.documents.first().unwrap().to_owned()); + + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + verify_log_id(&db.store, &author, &claimed_log_id, &document_id) + .await + .unwrap(); + }) + } + + #[rstest] + #[case::expected_skiplink_is_in_store_and_is_same_as_backlink(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::default(), SeqNum::new(4).unwrap())] + #[should_panic( + expected = "Expected skiplink target not found in store: , log id 0, seq num 19" + )] + #[case::skiplink_not_in_store(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::default(), SeqNum::new(20).unwrap())] + #[should_panic(expected = "Expected skiplink target not found in store")] + #[case::author_does_not_exist(KeyPair::new(), LogId::default(), SeqNum::new(5).unwrap())] + #[should_panic(expected = ", log id 4, seq num 6")] + #[case::log_id_is_wrong(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(4), SeqNum::new(7).unwrap())] + #[should_panic(expected = "Entry with seq num 1 can not have skiplink")] + #[case::seq_num_is_one(KeyPair::from_private_key_str(PRIVATE_KEY).unwrap(), LogId::new(0), SeqNum::new(1).unwrap())] + fn get_expected_skiplink_errors( + #[case] key_pair: KeyPair, + #[case] log_id: LogId, + #[case] seq_num: SeqNum, + #[from(test_db)] + #[with(7, 1, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + get_expected_skiplink(&db.store, &author, &log_id, &seq_num) + .await + .unwrap(); + }) + } + + #[rstest] + #[should_panic(expected = "Entry with seq num 1 can not have skiplink")] + #[case(SeqNum::new(1).unwrap(), SeqNum::new(1).unwrap())] + #[case(SeqNum::new(2).unwrap(), SeqNum::new(1).unwrap())] + #[case(SeqNum::new(3).unwrap(), SeqNum::new(2).unwrap())] + #[case(SeqNum::new(4).unwrap(), SeqNum::new(1).unwrap())] + #[case(SeqNum::new(5).unwrap(), SeqNum::new(4).unwrap())] + #[case(SeqNum::new(6).unwrap(), SeqNum::new(5).unwrap())] + #[case(SeqNum::new(7).unwrap(), SeqNum::new(6).unwrap())] + #[case(SeqNum::new(8).unwrap(), SeqNum::new(4).unwrap())] + #[case(SeqNum::new(9).unwrap(), SeqNum::new(8).unwrap())] + #[case(SeqNum::new(10).unwrap(), SeqNum::new(9).unwrap())] + fn gets_expected_skiplink( + key_pair: KeyPair, + #[case] seq_num: SeqNum, + #[case] expected_seq_num: SeqNum, + #[from(test_db)] + #[with(10, 1, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + let skiplink_entry = + get_expected_skiplink(&db.store, &author, &LogId::default(), &seq_num) + .await + .unwrap(); + + assert_eq!(skiplink_entry.seq_num(), expected_seq_num) + }) + } + + #[rstest] + #[should_panic(expected = "Document is deleted")] + fn identifies_deleted_document( + #[from(test_db)] + #[with(3, 1, 1, true)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let document_id = db.test_data.documents.first().unwrap(); + ensure_document_not_deleted(&db.store, document_id) + .await + .unwrap(); + }) + } + + #[rstest] + fn identifies_not_deleted_document( + #[from(test_db)] + #[with(3, 1, 1, false)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let document_id = db.test_data.documents.first().unwrap(); + assert!(ensure_document_not_deleted(&db.store, document_id) + .await + .is_ok()); + }) + } +}