diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bc4131f1..b3dad8d7a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +- Implement API changes to p2panda-rs storage traits, new and breaking db migration [#268](https://github.com/p2panda/aquadoggo/pull/268) + ### Fixed - Fix race-condition of mutably shared static schema store during testing [#269](https://github.com/p2panda/aquadoggo/pull/269) diff --git a/Cargo.lock b/Cargo.lock index e44cd0e27..1f18952ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1558,8 +1558,7 @@ dependencies = [ [[package]] name = "p2panda-rs" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64dab0c58183cd2605797ba14be3c2509fffe8936c716995b0c0680e88f8edde" +source = "git+https://github.com/p2panda/p2panda#ce4f4a33c6a58c23bbef50f05dc2c25d4fd9c130" dependencies = [ "arrayvec 0.5.2", "async-trait", diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 73408ce70..223453a4f 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -35,7 +35,7 @@ lipmaa-link = "^0.2.2" log = "^0.4.17" once_cell = "^1.12.0" openssl-probe = "^0.1.5" -p2panda-rs = { version = "^0.6.0", features = [ +p2panda-rs = { git = "https://github.com/p2panda/p2panda", ref = "ce4f4a33c6a58c23bbef50f05dc2c25d4fd9c130", features = [ "storage-provider", ] } serde = { version = "^1.0.144", features = ["derive"] } @@ -66,7 +66,7 @@ env_logger = "^0.9.0" http = "^0.2.8" hyper = "^0.14.19" once_cell = "^1.12.0" -p2panda-rs = { version = "^0.6.0", features = [ +p2panda-rs = { git = "https://github.com/p2panda/p2panda", ref = "ce4f4a33c6a58c23bbef50f05dc2c25d4fd9c130", features = [ "test-utils", "storage-provider", ] } diff --git a/aquadoggo/migrations/20230114140233_alter-documents.sql b/aquadoggo/migrations/20230114140233_alter-documents.sql new file mode 100644 index 000000000..14fd39305 --- /dev/null +++ b/aquadoggo/migrations/20230114140233_alter-documents.sql @@ -0,0 +1,3 @@ +-- SPDX-License-Identifier: AGPL-3.0-or-later + +ALTER TABLE document_views ADD COLUMN document_id TEXT NOT NULL; \ No newline at end of file diff --git a/aquadoggo/src/context.rs b/aquadoggo/src/context.rs index e234b8b53..c6c307f75 100644 --- a/aquadoggo/src/context.rs +++ b/aquadoggo/src/context.rs @@ -3,15 +3,18 @@ use std::ops::Deref; use std::sync::Arc; -use p2panda_rs::storage_provider::traits::StorageProvider; +use p2panda_rs::storage_provider::traits::{DocumentStore, EntryStore, LogStore, OperationStore}; use crate::config::Configuration; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::schema::SchemaProvider; /// Inner data shared across all services. #[derive(Debug)] -pub struct Data { +pub struct Data +where + S: EntryStore + OperationStore + LogStore + DocumentStore, +{ /// Node configuration. pub config: Configuration, @@ -22,7 +25,10 @@ pub struct Data { pub schema_provider: SchemaProvider, } -impl Data { +impl Data +where + S: EntryStore + OperationStore + LogStore + DocumentStore, +{ pub fn new(store: S, config: Configuration, schema_provider: SchemaProvider) -> Self { Self { config, @@ -34,22 +40,33 @@ impl Data { /// Data shared across all services. #[derive(Debug)] -pub struct Context(pub Arc>); +pub struct Context( + pub Arc>, +); -impl Context { +impl Context +where + S: EntryStore + OperationStore + LogStore + DocumentStore, +{ /// Returns a new instance of `Context`. pub fn new(store: S, config: Configuration, schema_provider: SchemaProvider) -> Self { Self(Arc::new(Data::new(store, config, schema_provider))) } } -impl Clone for Context { +impl Clone for Context +where + S: EntryStore + OperationStore + LogStore + DocumentStore, +{ fn clone(&self) -> Self { Self(self.0.clone()) } } -impl Deref for Context { +impl Deref for Context +where + S: EntryStore + OperationStore + LogStore + DocumentStore, +{ type Target = Data; fn deref(&self) -> &Self::Target { diff --git a/aquadoggo/src/db/errors.rs b/aquadoggo/src/db/errors.rs index 273d62c11..b4b675786 100644 --- a/aquadoggo/src/db/errors.rs +++ b/aquadoggo/src/db/errors.rs @@ -2,11 +2,11 @@ use p2panda_rs::schema::error::{SchemaError, SchemaIdError}; use p2panda_rs::schema::system::SystemSchemaError; -use p2panda_rs::storage_provider::error::DocumentStorageError; +use p2panda_rs::storage_provider::error::{DocumentStorageError, OperationStorageError}; /// `SQLStorage` errors. #[derive(thiserror::Error, Debug)] -pub enum SqlStorageError { +pub enum SqlStoreError { #[error("SQL query failed: {0}")] Transaction(String), @@ -37,4 +37,8 @@ pub enum SchemaStoreError { /// Error returned from `DocumentStore` methods. #[error(transparent)] DocumentStorageError(#[from] DocumentStorageError), + + /// Error returned from `OperationStore` methods. + #[error(transparent)] + OperationStorageError(#[from] OperationStorageError), } diff --git a/aquadoggo/src/db/mod.rs b/aquadoggo/src/db/mod.rs index aeb6faaaa..b5988bca4 100644 --- a/aquadoggo/src/db/mod.rs +++ b/aquadoggo/src/db/mod.rs @@ -1,5 +1,9 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! Persistent storage for an `aquadoggo` node supporting both Postgres and SQLite databases. +//! +//! The main interface is [`SqlStore`] which offers an interface onto the database by implementing +//! the storage traits defined in `p2panda-rs` as well as some implementation specific features. use anyhow::{Error, Result}; use sqlx::any::{Any, AnyPool, AnyPoolOptions}; use sqlx::migrate; @@ -7,10 +11,21 @@ use sqlx::migrate::MigrateDatabase; pub mod errors; pub mod models; -pub mod provider; pub mod stores; -pub mod traits; -pub mod utils; +pub mod types; + +/// SQL based persistent storage that implements `EntryStore`, `OperationStore`, `LogStore` and `DocumentStore`. +#[derive(Clone, Debug)] +pub struct SqlStore { + pub(crate) pool: Pool, +} + +impl SqlStore { + /// Create a new `SqlStore` using the provided db `Pool`. + pub fn new(pool: Pool) -> Self { + Self { pool } + } +} /// Re-export of generic connection pool type. pub type Pool = AnyPool; diff --git a/aquadoggo/src/db/models/document.rs b/aquadoggo/src/db/models/document.rs index 6847b5a54..33d8ba710 100644 --- a/aquadoggo/src/db/models/document.rs +++ b/aquadoggo/src/db/models/document.rs @@ -20,3 +20,22 @@ pub struct DocumentViewFieldRow { /// The actual value contained in this field. pub value: String, } + +/// A struct representing a single row of a document table. +#[derive(FromRow, Debug, Clone)] +pub struct DocumentRow { + /// The id of this document + pub document_id: String, + + /// The id of this documents most recent view. + pub document_view_id: String, + + /// The id of the author of this document. + pub public_key: String, + + /// The id of this documents schema. + pub schema_id: String, + + /// Flag for if this document is deleted. + pub is_deleted: bool, +} diff --git a/aquadoggo/src/db/models/mod.rs b/aquadoggo/src/db/models/mod.rs index 5b59c46fb..4611b8e34 100644 --- a/aquadoggo/src/db/models/mod.rs +++ b/aquadoggo/src/db/models/mod.rs @@ -1,12 +1,16 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -pub mod document; +//! Structs representing rows in SQL tables. Needed when coercing results returned from a +//! query using the `sqlx` library. +mod document; mod entry; mod log; mod operation; mod task; +pub mod utils; pub use self::log::LogRow; +pub use document::{DocumentRow, DocumentViewFieldRow}; pub use entry::EntryRow; pub use operation::{OperationFieldsJoinedRow, OperationRow}; pub use task::TaskRow; diff --git a/aquadoggo/src/db/utils.rs b/aquadoggo/src/db/models/utils.rs similarity index 99% rename from aquadoggo/src/db/utils.rs rename to aquadoggo/src/db/models/utils.rs index d567ca838..da2aed5ca 100644 --- a/aquadoggo/src/db/utils.rs +++ b/aquadoggo/src/db/models/utils.rs @@ -1,5 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! Utility methods for parsing database rows into p2panda data types. use std::collections::BTreeMap; use p2panda_rs::document::{DocumentId, DocumentViewFields, DocumentViewId, DocumentViewValue}; @@ -11,9 +12,9 @@ use p2panda_rs::operation::{ }; use p2panda_rs::schema::SchemaId; -use crate::db::models::document::DocumentViewFieldRow; +use crate::db::models::DocumentViewFieldRow; use crate::db::models::OperationFieldsJoinedRow; -use crate::db::stores::StorageOperation; +use crate::db::types::StorageOperation; /// Takes a vector of `OperationFieldsJoinedRow` and parses them into an `VerifiedOperation` /// struct. @@ -33,6 +34,7 @@ pub fn parse_operation_rows( let schema_id: SchemaId = first_row.schema_id.parse().unwrap(); let public_key = PublicKey::new(&first_row.public_key).unwrap(); let operation_id = first_row.operation_id.parse().unwrap(); + let document_id = first_row.document_id.parse().unwrap(); let mut relation_lists: BTreeMap> = BTreeMap::new(); let mut pinned_relation_lists: BTreeMap> = BTreeMap::new(); @@ -161,6 +163,7 @@ pub fn parse_operation_rows( .unwrap(); let operation = StorageOperation { + document_id, id: operation_id, version: operation.version(), action: operation.action(), @@ -355,7 +358,7 @@ mod tests { use p2panda_rs::test_utils::fixtures::{create_operation, schema_id}; use rstest::rstest; - use crate::db::models::{document::DocumentViewFieldRow, OperationFieldsJoinedRow}; + use crate::db::models::{DocumentViewFieldRow, OperationFieldsJoinedRow}; use super::{parse_document_view_field_rows, parse_operation_rows, parse_value_to_string_vec}; diff --git a/aquadoggo/src/db/provider.rs b/aquadoggo/src/db/provider.rs deleted file mode 100644 index cd7617bfe..000000000 --- a/aquadoggo/src/db/provider.rs +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use async_trait::async_trait; -use p2panda_rs::document::{DocumentId, DocumentViewId}; -use p2panda_rs::hash::Hash; -use p2panda_rs::schema::SchemaId; -use p2panda_rs::storage_provider::error::OperationStorageError; -use p2panda_rs::storage_provider::traits::StorageProvider; -use sqlx::query_scalar; - -use crate::db::stores::{StorageEntry, StorageLog, StorageOperation}; -use crate::db::Pool; -use crate::errors::Result; - -/// Sql based storage that implements `StorageProvider`. -#[derive(Clone, Debug)] -pub struct SqlStorage { - pub(crate) pool: Pool, -} - -impl SqlStorage { - /// Create a new `SqlStorage` using the provided db `Pool`. - pub fn new(pool: Pool) -> Self { - Self { pool } - } -} - -/// A `StorageProvider` implementation based on `sqlx` that supports SQLite and PostgreSQL -/// databases. -#[async_trait] -impl StorageProvider for SqlStorage { - type StorageLog = StorageLog; - type Entry = StorageEntry; - type Operation = StorageOperation; - - /// Returns the related document for any entry. - /// - /// Every entry is part of a document and, through that, associated with a specific log id used - /// by this document and public_key. This method returns that document id by looking up the log - /// that the entry was stored in. - async fn get_document_by_entry(&self, entry_hash: &Hash) -> Result> { - let result: Option = query_scalar( - " - SELECT - logs.document - FROM - logs - INNER JOIN entries - ON (logs.log_id = entries.log_id - AND logs.public_key = entries.public_key) - WHERE - entries.entry_hash = $1 - ", - ) - .bind(entry_hash.as_str()) - .fetch_optional(&self.pool) - .await?; - - // Unwrap here since we validate hashes before storing them in the db. - let hash = result.map(|str| { - Hash::new(&str) - .expect("Corrupt hash found in database") - .into() - }); - - Ok(hash) - } -} - -impl SqlStorage { - /// Returns the schema id for a document view. - /// - /// Returns `None` if this document view is not found. - pub async fn get_schema_by_document_view( - &self, - view_id: &DocumentViewId, - ) -> Result> { - let result: Option = query_scalar( - " - SELECT - schema_id - FROM - document_views - WHERE - document_view_id = $1 - ", - ) - .bind(view_id.to_string()) - .fetch_optional(&self.pool) - .await - .map_err(|e| OperationStorageError::FatalStorageError(e.to_string()))?; - - // Unwrap because we expect no invalid schema ids in the db. - Ok(result.map(|id_str| id_str.parse().unwrap())) - } -} - -#[cfg(test)] -mod tests { - use p2panda_rs::identity::KeyPair; - use p2panda_rs::schema::SchemaId; - use p2panda_rs::test_utils::fixtures::{key_pair, random_document_view_id}; - use p2panda_rs::{document::DocumentViewId, schema::FieldType}; - use rstest::rstest; - - use crate::db::stores::test_utils::{add_schema, test_db, TestDatabase, TestDatabaseRunner}; - - #[rstest] - fn test_get_schema_for_view( - key_pair: KeyPair, - #[from(test_db)] - #[with(1, 1, 1)] - runner: TestDatabaseRunner, - ) { - runner.with_db_teardown(|mut db: TestDatabase| async move { - let schema = add_schema( - &mut db, - "venue", - vec![ - ("description", FieldType::String), - ("profile_name", FieldType::String), - ], - &key_pair, - ) - .await; - - let document_view_id = match schema.id() { - SchemaId::Application(_, view_id) => view_id, - _ => panic!("Invalid schema id"), - }; - - let result = db.store.get_schema_by_document_view(document_view_id).await; - - assert!(result.is_ok()); - // This is the schema name of the schema document we published. - assert_eq!(result.unwrap().unwrap().name(), "schema_definition"); - }); - } - - #[rstest] - fn test_get_schema_for_missing_view( - random_document_view_id: DocumentViewId, - #[from(test_db)] - #[with(1, 1, 1)] - runner: TestDatabaseRunner, - ) { - runner.with_db_teardown(|db: TestDatabase| async move { - let result = db - .store - .get_schema_by_document_view(&random_document_view_id) - .await; - - assert!(result.is_ok()); - assert!(result.unwrap().is_none()); - }); - } -} diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index 227e9a907..df9bd75f0 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -1,300 +1,227 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::collections::BTreeMap; - +//! This module implements `DocumentStore` on `SqlStore` as well as aditional insertion methods +//! specific to the `aquadoggo` storage patterns. The resulting interface offers all storage +//! methods used for persisting and retrieving materialised documents. +//! +//! Documents are created and mutated via operations which arrive at a node. Once validated, the +//! new operations are sent straight to the materialiser service which builds the documents +//! themselves. On completion, the resultant documents are stored and can be retrieved using the +//! methods defined here. +//! +//! The whole document store can be seen as a live cache. All it's content is derived from +//! operations already stored on the node. It allows easy and quick access to current or pinned +//! values. +//! +//! Documents are stored in the database in three tables. These are `documents`, `document_views` +//! and `document_view_fields`. A `document` can have many `document_views`, one showing the +//! current state and any number of historic views. A `document_view` itself a unique id plus one +//! or many `document_view_fields` which are pointers to the operation holding the current value +//! for the documents' field. +//! +//! As mentioned above, a useful property of documents is that they make it easy to retain past +//! state, we call these states document views. When a document is updated it gets a new state, or +//! view, which can be referred to by a globally unique document view id. +//! +//! The getter methods allow retrieving a document by it's `DocumentId` or it's +//! `DocumentViewId`. The former always returns the most current document state, the latter +//! returns the specific document view if it has already been materialised and stored. Although it +//! is possible to construct a document at any point in it's history if all operations are +//! retained, we use a system of "pinned relations" to identify and materialise only views we +//! explicitly wish to keep. use async_trait::async_trait; use futures::future::try_join_all; +use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::{Document, DocumentId, DocumentView, DocumentViewId}; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::error::DocumentStorageError; use p2panda_rs::storage_provider::traits::DocumentStore; -use sqlx::{query, query_as}; +use sqlx::any::AnyQueryResult; +use sqlx::{query, query_as, query_scalar}; -use crate::db::models::document::DocumentViewFieldRow; -use crate::db::provider::SqlStorage; -use crate::db::utils::parse_document_view_field_rows; +use crate::db::models::utils::parse_document_view_field_rows; +use crate::db::models::{DocumentRow, DocumentViewFieldRow}; +use crate::db::types::StorageDocument; +use crate::db::Pool; +use crate::db::SqlStore; +/// Implementation of #[async_trait] -impl DocumentStore for SqlStorage { - /// Insert a document_view into the db. - /// - /// Internally, this method performs two different operations: - /// - insert a row for every document_view_field present on this view - /// - insert a row for the document_view itself - /// - /// If either of these operations fail and error is returned. - async fn insert_document_view( - &self, - document_view: &DocumentView, - schema_id: &SchemaId, - ) -> Result<(), DocumentStorageError> { - // Start a transaction, any db insertions after this point, and before the `commit()` - // will be rolled back in the event of an error. - let transaction = self - .pool - .begin() - .await - .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; +impl DocumentStore for SqlStore { + type Document = StorageDocument; - // Insert document view field relations into the db - let field_relations_insertion_result = - try_join_all(document_view.iter().map(|(name, value)| { - query( - " - INSERT INTO - document_view_fields ( - document_view_id, - operation_id, - name - ) - VALUES - ($1, $2, $3) - ", - ) - .bind(document_view.id().to_string()) - .bind(value.id().as_str().to_owned()) - .bind(name) - .execute(&self.pool) - })) - .await - .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - - // Insert document view into the db - let document_view_insertion_result = query( - " - INSERT INTO - document_views ( - document_view_id, - schema_id - ) - VALUES - ($1, $2) - ", - ) - .bind(document_view.id().to_string()) - .bind(schema_id.to_string()) - .execute(&self.pool) - .await - .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - - // Check every insertion performed affected exactly 1 row. - if document_view_insertion_result.rows_affected() != 1 - || field_relations_insertion_result - .iter() - .any(|query_result| query_result.rows_affected() != 1) - { - return Err(DocumentStorageError::DocumentViewInsertionError( - document_view.id().clone(), - )); - } - - // Commit the transaction. - transaction - .commit() - .await - .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - - Ok(()) - } - - /// Get a document view from the database by it's id. + /// Get a document from the store by it's `DocumentId`. /// - /// Internally, this method retrieve all document rows related to this document view id - /// and then from these constructs the document view itself. + /// Retrieves a document in it's most current state from the store. Ignores documents which + /// contain a DELETE operation. /// - /// An error is returned if any of the above steps fail or a fatal database error occured. - async fn get_document_view_by_id( + /// An error is returned only if a fatal database error occurs. + async fn get_document( &self, - id: &DocumentViewId, - ) -> Result, DocumentStorageError> { - let document_view_field_rows = query_as::<_, DocumentViewFieldRow>( + id: &DocumentId, + ) -> Result, DocumentStorageError> { + // Retrieve one row from the document table matching on the passed id. + let document_row = query_as::<_, DocumentRow>( " SELECT - document_view_fields.document_view_id, - document_view_fields.operation_id, - document_view_fields.name, - operation_fields_v1.list_index, - operation_fields_v1.field_type, - operation_fields_v1.value + documents.document_id, + documents.document_view_id, + documents.schema_id, + operations_v1.public_key, + documents.is_deleted FROM - document_view_fields - LEFT JOIN operation_fields_v1 + documents + LEFT JOIN operations_v1 ON - operation_fields_v1.operation_id = document_view_fields.operation_id - AND - operation_fields_v1.name = document_view_fields.name + operations_v1.operation_id = $1 WHERE - document_view_fields.document_view_id = $1 - ORDER BY - operation_fields_v1.list_index ASC + documents.document_id = $1 AND documents.is_deleted = false ", ) .bind(id.to_string()) - .fetch_all(&self.pool) + .fetch_optional(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - let view = if document_view_field_rows.is_empty() { - None - } else { - Some(DocumentView::new( - id, - &parse_document_view_field_rows(document_view_field_rows), - )) + // If no row matched we return None here, otherwise unwrap safely. + let document_row = match document_row { + Some(document_row) => document_row, + None => return Ok(None), + }; + + // We now want to retrieve the view (current key-value map) for this document, as we + // already filtered out deleted documents in the query above we can expect all documents + // we handle here to have an associated view in the database. + let document_view_id = document_row.document_view_id.parse().unwrap(); + let document_view_field_rows = + get_document_view_field_rows(&self.pool, &document_view_id).await?; + // this method assumes all values coming from the db are already validated and so + // unwraps where errors might occur. + let document_view_fields = Some(parse_document_view_field_rows(document_view_field_rows)); + + // Construct a `StorageDocument` based on the retrieved values. + let document = StorageDocument { + id: id.to_owned(), + view_id: document_view_id, + schema_id: document_row.schema_id.parse().unwrap(), + fields: document_view_fields, + author: document_row.public_key.parse().unwrap(), + deleted: document_row.is_deleted, }; - Ok(view) + Ok(Some(document)) } - /// Insert a document and it's latest document view into the database. + /// Get a document from the database by `DocumentViewId`. /// - /// This method inserts or updates a row into the documents table and then makes a call - /// to `insert_document_view()` to insert the new document view for this document. + /// Get's a document at a specific point in it's history. Only returns views that have already + /// been materialised and persisted in the store. These are likely to be "pinned views" which + /// are relations from other documents, in which case the materialiser service will have + /// identified and materialised them ready for querying. /// - /// Note: "out-of-date" document views will remain in storage when a document already - /// existed and is updated. If they are not needed for anything else they can be garbage - /// collected. - async fn insert_document(&self, document: &Document) -> Result<(), DocumentStorageError> { - // Start a transaction, any db insertions after this point, and before the `commit()` - // will be rolled back in the event of an error. - let transaction = self - .pool - .begin() - .await - .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - - // Insert document view into the db - let document_insertion_result = query( + /// Any view which existed as part of a document which is now deleted is ignored. + /// + /// An error is returned only if a fatal database error occurs. + async fn get_document_by_view_id( + &self, + id: &DocumentViewId, + ) -> Result, DocumentStorageError> { + // Retrieve the id of the document which the passed view id comes from. + let document_id: Option = query_scalar( " - INSERT INTO - documents ( - document_id, - document_view_id, - is_deleted, - schema_id - ) - VALUES - ($1, $2, $3, $4) - ON CONFLICT(document_id) DO UPDATE SET - document_view_id = $2, - is_deleted = $3 + SELECT + document_id + FROM + document_views + WHERE + document_view_id = $1 ", ) - .bind(document.id().as_str()) - .bind(document.view_id().to_string()) - .bind(document.is_deleted()) - .bind(document.schema().to_string()) - .execute(&self.pool) + .bind(id.to_string()) + .fetch_optional(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - if document_insertion_result.rows_affected() != 1 { - return Err(DocumentStorageError::DocumentInsertionError( - document.id().clone(), - )); - } - - if !document.is_deleted() && document.view().is_some() { - let document_view = - DocumentView::new(document.view_id(), document.view().unwrap().fields()); - - self.insert_document_view(&document_view, document.schema()) - .await?; + // Parse the document id if one was found otherwise we can already return None here as no + // document for the passed view could be found. + let document_id: DocumentId = match document_id { + Some(document_id) => document_id.parse().unwrap(), + None => return Ok(None), }; - // Commit the transaction. - transaction - .commit() - .await - .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - - Ok(()) - } - - /// Get a documents' latest document view from the database by it's `DocumentId`. - /// - /// Retrieve the current document view for a specified document. If the document - /// has been deleted then None is returned. An error is returned is a fatal database - /// error occurs. - async fn get_document_by_id( - &self, - id: &DocumentId, - ) -> Result, DocumentStorageError> { - let document_view_field_rows = query_as::<_, DocumentViewFieldRow>( + // Get a row for the document matching to the found document id. + let document_row = query_as::<_, DocumentRow>( " SELECT - document_view_fields.document_view_id, - document_view_fields.operation_id, - document_view_fields.name, - operation_fields_v1.list_index, - operation_fields_v1.field_type, - operation_fields_v1.value + documents.document_id, + documents.document_view_id, + documents.schema_id, + operations_v1.public_key, + documents.is_deleted FROM documents - LEFT JOIN document_view_fields - ON - documents.document_view_id = document_view_fields.document_view_id - LEFT JOIN operation_fields_v1 + LEFT JOIN operations_v1 ON - document_view_fields.operation_id = operation_fields_v1.operation_id - AND - document_view_fields.name = operation_fields_v1.name + operations_v1.operation_id = $1 WHERE documents.document_id = $1 AND documents.is_deleted = false - ORDER BY - operation_fields_v1.list_index ASC ", ) - .bind(id.as_str()) - .fetch_all(&self.pool) + .bind(document_id.to_string()) + .fetch_optional(&self.pool) .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - if document_view_field_rows.is_empty() { - return Ok(None); - } + // Unwrap as we can assume a document for the found document id exists. + let document_row = document_row.unwrap(); + + // We now want to retrieve the view (current key-value map) for this document, as we + // already filtered out deleted documents in the query above we can expect all documents + // we handle here to have an associated view in the database. + let document_view_field_rows = get_document_view_field_rows(&self.pool, &id).await?; + // this method assumes all values coming from the db are already validated and so + // unwraps where errors might occur. + let document_view_fields = Some(parse_document_view_field_rows(document_view_field_rows)); + + // Construct a `StorageDocument` based on the retrieved values. + let document = StorageDocument { + id: document_row.document_id.parse().unwrap(), + view_id: id.to_owned(), /* set the requested document view id not the current */ + schema_id: document_row.schema_id.parse().unwrap(), + fields: document_view_fields, + author: document_row.public_key.parse().unwrap(), + deleted: document_row.is_deleted, + }; - Ok(Some(DocumentView::new( - &document_view_field_rows[0] - .document_view_id - .parse() - .unwrap(), - &parse_document_view_field_rows(document_view_field_rows), - ))) + Ok(Some(document)) } - /// Get all documents which follow the passed schema id from the database + /// Get all documents which follow the passed schema id. /// - /// Retrieve the latest document view for all documents which follow the specified schema. + /// Retrieves all documents, with their most current views, which follow the specified schema. + /// Deleted documents are not included. /// - /// An error is returned is a fatal database error occurs. + /// An error is returned only if a fatal database error occurs. async fn get_documents_by_schema( &self, schema_id: &SchemaId, - ) -> Result, DocumentStorageError> { - let document_view_field_rows = query_as::<_, DocumentViewFieldRow>( + ) -> Result, DocumentStorageError> { + // Retrieve all rows from the document table where the passed schema_id matches. + let document_rows = query_as::<_, DocumentRow>( " SELECT + documents.document_id, documents.document_view_id, - document_view_fields.operation_id, - document_view_fields.name, - operation_fields_v1.list_index, - operation_fields_v1.field_type, - operation_fields_v1.value + documents.schema_id, + operations_v1.public_key, + documents.is_deleted FROM documents - LEFT JOIN document_view_fields - ON - documents.document_view_id = document_view_fields.document_view_id - LEFT JOIN operation_fields_v1 + LEFT JOIN operations_v1 ON - document_view_fields.operation_id = operation_fields_v1.operation_id - AND - document_view_fields.name = operation_fields_v1.name + operations_v1.operation_id = documents.document_id WHERE - documents.schema_id = $1 AND documents.is_deleted = false - ORDER BY - operation_fields_v1.list_index ASC + documents.schema_id = $1 AND documents.is_deleted = false ", ) .bind(schema_id.to_string()) @@ -302,66 +229,308 @@ impl DocumentStore for SqlStorage { .await .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; - // We need to group all returned field rows by their document_view_id so we can then - // build schema from them. - let mut grouped_document_field_rows: BTreeMap> = - BTreeMap::new(); - - for row in document_view_field_rows { - let existing_view = grouped_document_field_rows.get_mut(&row.document_view_id); - if let Some(existing_view) = existing_view { - existing_view.push(row) - } else { - grouped_document_field_rows.insert(row.clone().document_view_id, vec![row]); + // If no rows were found we can already return an empty vec here. + if document_rows.is_empty() { + return Ok(vec![]); + } + + // For every row we found we want to retrieve the current view as well. + let mut documents: Vec = vec![]; + for document_row in document_rows { + let document_view_id = document_row.document_view_id.parse().unwrap(); + // We now want to retrieve the view (current key-value map) for this document, as we + // already filtered out deleted documents in the query above we can expect all documents + // we handle here to have an associated view in the database. + let document_view_field_rows = + get_document_view_field_rows(&self.pool, &document_view_id).await?; + // this method assumes all values coming from the db are already validated and so + // unwraps where errors might occur. + let document_view_fields = + Some(parse_document_view_field_rows(document_view_field_rows)); + + // Construct a `StorageDocument` based on the retrieved values. + let document = StorageDocument { + id: document_row.document_id.parse().unwrap(), + view_id: document_view_id, + schema_id: document_row.schema_id.parse().unwrap(), + fields: document_view_fields, + author: document_row.public_key.parse().unwrap(), + deleted: document_row.is_deleted, }; + + documents.push(document) + } + + Ok(documents) + } +} + +/// Storage api offering an interface for inserting documents and document views into the database. +/// +/// These methods are specific to `aquadoggo`s approach to document caching and are defined +/// outside of the required `DocumentStore` trait. +impl SqlStore { + /// Insert a document into the database. + /// + /// This method inserts or updates a row in the documents table and then inserts the documents + /// current view and field values into the `document_views` and `document_view_fields` tables + /// respectively. + /// + /// If the document already existed in the store then it's current view and view id will be + /// updated with those contained on the passed document. + /// + /// If any of the operations fail all insertions are rolled back. + /// + /// An error is returned in the case of a fatal database error. + /// + /// Note: "out-of-date" document views will remain in storage when a document already existed + /// and is updated. If they are not needed for anything else they can be garbage collected. + pub async fn insert_document(&self, document: &Document) -> Result<(), DocumentStorageError> { + // Start a transaction, any db insertions after this point, and before the `commit()` + // can be rolled back in the event of an error. + let transaction = self + .pool + .begin() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; + + // Insert the document and view to the database, in the case of an error all insertions + // since the transaction was instantiated above will be rolled back. + match insert_document(&self.pool, document).await { + // Commit the transaction here if no error occurred. + Ok(_) => transaction + .commit() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string())), + // Rollback here if an error occurred. + Err(err) => { + transaction + .rollback() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; + Err(err) + } } + } - let document_views: Vec = grouped_document_field_rows - .iter() - .map(|(id, document_field_row)| { - let fields = parse_document_view_field_rows(document_field_row.to_owned()); - DocumentView::new(&id.parse().unwrap(), &fields) - }) - .collect(); + /// Insert a document view into the database. + /// + /// This method performs one insertion in the `document_views` table and at least one in the + /// `document_view_fields` table. If either of these operations fail then all insertions are + /// rolled back. + /// + /// An error is returned in the case of a fatal storage error. + pub async fn insert_document_view( + &self, + document_view: &DocumentView, + document_id: &DocumentId, + schema_id: &SchemaId, + ) -> Result<(), DocumentStorageError> { + // Start a transaction, any db insertions after this point, and before the `commit()` + // will be rolled back in the event of an error. + let transaction = self + .pool + .begin() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; + + // Insert the document view into the `document_views` table. Rollback insertions if an error occurs. + match insert_document_view(&self.pool, document_view, document_id, schema_id).await { + Ok(_) => (), + Err(err) => { + transaction + .rollback() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; + return Err(err); + } + }; - Ok(document_views) + // Insert the document view fields into the `document_view_fields` table. Rollback + // insertions if an error occurs. + match insert_document_fields(&self.pool, &document_view).await { + Ok(_) => (), + Err(err) => { + transaction + .rollback() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; + return Err(err); + } + }; + + // Commit the transaction here as no errors occurred. + transaction + .commit() + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string())) } } +// Helper method for getting rows from the `document_view_fields` table. +async fn get_document_view_field_rows( + pool: &Pool, + id: &DocumentViewId, +) -> Result, DocumentStorageError> { + // Get all rows which match against the passed document view id. + // + // This query performs a join against the `operation_fields_v1` table as this is where the + // actual field values live. The `document_view_fields` table defines relations between a + // document view and the operation values which hold it's field values. + // + // Each field has one row, or in the case of list values (pinned relations, or relation lists) + // then one row exists for every item in the list. The `list_index` column is used for + // consistently ordering list items. + query_as::<_, DocumentViewFieldRow>( + " + SELECT + document_view_fields.document_view_id, + document_view_fields.operation_id, + document_view_fields.name, + operation_fields_v1.list_index, + operation_fields_v1.field_type, + operation_fields_v1.value + FROM + document_view_fields + LEFT JOIN operation_fields_v1 + ON + document_view_fields.operation_id = operation_fields_v1.operation_id + AND + document_view_fields.name = operation_fields_v1.name + WHERE + document_view_fields.document_view_id = $1 + ORDER BY + operation_fields_v1.list_index ASC + ", + ) + .bind(id.to_string()) + .fetch_all(pool) + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string())) +} + +// Helper method for inserting rows in the `document_view_fields` table. +async fn insert_document_fields( + pool: &Pool, + document_view: &DocumentView, +) -> Result, DocumentStorageError> { + // Insert document view field relations into the db + try_join_all(document_view.iter().map(|(name, value)| { + query( + " + INSERT INTO + document_view_fields ( + document_view_id, + operation_id, + name + ) + VALUES + ($1, $2, $3) + ", + ) + .bind(document_view.id().to_string()) + .bind(value.id().as_str().to_owned()) + .bind(name) + .execute(pool) + })) + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string())) +} + +// Helper method for inserting document views into the `document_views` table. +async fn insert_document_view( + pool: &Pool, + document_view: &DocumentView, + document_id: &DocumentId, + schema_id: &SchemaId, +) -> Result { + query( + " + INSERT INTO + document_views ( + document_view_id, + document_id, + schema_id + ) + VALUES + ($1, $2, $3) + ", + ) + .bind(document_view.id().to_string()) + .bind(document_id.to_string()) + .bind(schema_id.to_string()) + .execute(pool) + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string())) +} + +// Helper method for inserting documents into the database. For this, insertions are made in the +// `documents`, `document_views` and `document_view_fields` tables. +async fn insert_document(pool: &Pool, document: &Document) -> Result<(), DocumentStorageError> { + // Insert or update the document to the `documents` table. + query( + " + INSERT INTO + documents ( + document_id, + document_view_id, + is_deleted, + schema_id + ) + VALUES + ($1, $2, $3, $4) + ON CONFLICT(document_id) DO UPDATE SET + document_view_id = $2, + is_deleted = $3 + ", + ) + .bind(document.id().as_str()) + .bind(document.view_id().to_string()) + .bind(document.is_deleted()) + .bind(document.schema_id().to_string()) + .execute(pool) + .await + .map_err(|e| DocumentStorageError::FatalStorageError(e.to_string()))?; + + // If the document is not deleted, then we also want to insert it's view and fields. + if !document.is_deleted() && document.view().is_some() { + // Construct the view, unwrapping the document view fields as we checked they exist above. + let document_view = + DocumentView::new(document.view_id(), document.view().unwrap().fields()); + + // Insert the document view. + insert_document_view(pool, &document_view, document.id(), document.schema_id()).await?; + // Insert the document view fields. + insert_document_fields(pool, &document_view).await?; + }; + + Ok(()) +} + #[cfg(test)] mod tests { - use p2panda_rs::document::{ - Document, DocumentBuilder, DocumentId, DocumentViewFields, DocumentViewId, - }; + use p2panda_rs::document::materialization::build_graph; + use p2panda_rs::document::traits::AsDocument; + use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewFields, DocumentViewId}; use p2panda_rs::operation::traits::AsOperation; use p2panda_rs::operation::{Operation, OperationId}; - use p2panda_rs::storage_provider::traits::StorageProvider; + use p2panda_rs::storage_provider::traits::{DocumentStore, OperationStore}; use p2panda_rs::test_utils::constants; use p2panda_rs::test_utils::fixtures::{ - operation, random_document_view_id, random_operation_id, + operation, random_document_id, random_document_view_id, random_operation_id, }; + use p2panda_rs::WithId; use rstest::rstest; - use crate::db::stores::document::{DocumentStore, DocumentView}; - use crate::db::stores::test_utils::{doggo_schema, test_db, TestDatabase, TestDatabaseRunner}; - - async fn build_document(store: &S, document_id: &DocumentId) -> Document { - // We retrieve the operations. - let document_operations = store - .get_operations_by_document_id(document_id) - .await - .expect("Get operations"); - - // Then we construct the document. - DocumentBuilder::new(document_operations) - .build() - .expect("Build the document") - } + use crate::db::stores::document::DocumentView; + use crate::db::stores::test_utils::{ + build_document, doggo_schema, test_db, TestDatabase, TestDatabaseRunner, + }; #[rstest] fn insert_and_get_one_document_view( #[from(test_db)] - #[with(1, 1, 1)] + #[with(2, 1, 1)] runner: TestDatabaseRunner, ) { runner.with_db_teardown(|db: TestDatabase| async move { @@ -369,43 +538,100 @@ mod tests { let document_id = db.test_data.documents[0].clone(); // Get the operations and build the document. - let document = build_document(&db.store, &document_id).await; + let operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); + let document_builder = DocumentBuilder::from(&operations); + + let create_operation = WithId::::id( + operations + .iter() + .find(|operation| operation.is_create()) + .unwrap(), + ) + .to_owned(); + let update_operation = WithId::::id( + operations + .iter() + .find(|operation| operation.is_update()) + .unwrap(), + ) + .to_owned(); + + let document_at_view_1 = document_builder + .build_to_view_id(Some(create_operation.into())) + .unwrap(); + let document_at_view_2 = document_builder + .build_to_view_id(Some(update_operation.into())) + .unwrap(); - // Get it's document view and insert it in the database. - let document_view = document.view().expect("Get document view"); + // Insert the document into the store + let result = db.store.insert_document(&document_at_view_2).await; + assert!(result.is_ok()); - // Insert the view into the store. + // Insert it's other view into the store (now this works as the document exists) let result = db .store - .insert_document_view(document_view, document.schema()) + .insert_document_view( + &document_at_view_1.view().unwrap(), + document_at_view_1.id(), + document_at_view_1.schema_id(), + ) .await; assert!(result.is_ok()); - // We should be able to retrieve the document view now by it's view_id. - let retrieved_document_view = db + // We should be able to retrieve the document at either of it's views now. + let retrieved_document_at_view_1 = db .store - .get_document_view_by_id(document_view.id()) + .get_document_by_view_id(document_at_view_1.view_id()) .await .unwrap() .unwrap(); - // The retrieved view should the expected fields. - assert_eq!(retrieved_document_view.len(), 9); + let retrieved_document_at_view_2 = db + .store + .get_document_by_view_id(document_at_view_2.view_id()) + .await + .unwrap() + .unwrap(); - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(retrieved_document_view.get(key).is_some()); - assert_eq!(retrieved_document_view.get(key), document_view.get(key)); + for (name, _) in document_at_view_1.fields().unwrap().iter() { + println!("{name}"); + assert_eq!( + document_at_view_1.get(name), + retrieved_document_at_view_1.get(name) + ) } + + // The retrieved document views should match the inserted ones. + assert_eq!(retrieved_document_at_view_1.id(), document_at_view_1.id()); + assert_eq!( + retrieved_document_at_view_1.view_id(), + document_at_view_1.view_id() + ); + assert_eq!( + retrieved_document_at_view_1.fields(), + document_at_view_1.fields() + ); + assert_eq!(retrieved_document_at_view_2.id(), document_at_view_2.id()); + assert_eq!( + retrieved_document_at_view_2.view_id(), + document_at_view_2.view_id() + ); + assert_eq!( + retrieved_document_at_view_2.fields(), + document_at_view_2.fields() + ); + + // If we retrieve the document by it's id, we expect the view inserted with the document + // itself. + let document = db.store.get_document(&document_id).await.unwrap().unwrap(); + + assert_eq!(document.id(), document_at_view_2.id()); + assert_eq!(document.view_id(), document_at_view_2.view_id()); + assert_eq!(document.fields(), document_at_view_2.fields()); }); } @@ -421,7 +647,7 @@ mod tests { // with that id exists. let view_does_not_exist = db .store - .get_document_view_by_id(&random_document_view_id) + .get_document_by_view_id(&random_document_view_id) .await .unwrap(); @@ -433,6 +659,7 @@ mod tests { #[rstest] fn insert_document_view_with_missing_operation( #[from(random_operation_id)] operation_id: OperationId, + #[from(random_document_id)] document_id: DocumentId, #[from(random_document_view_id)] document_view_id: DocumentViewId, #[from(test_db)] runner: TestDatabaseRunner, operation: Operation, @@ -451,7 +678,7 @@ mod tests { // operation which is already in the database. let result = db .store - .insert_document_view(&document_view, constants::schema().id()) + .insert_document_view(&document_view, &document_id, constants::schema().id()) .await; assert!(result.is_err()); @@ -469,7 +696,6 @@ mod tests { let document_id = db.test_data.documents[0].clone(); // Build the document and view. let document = build_document(&db.store, &document_id).await; - let expected_document_view = document.view().expect("Get document view"); // The document is successfully inserted into the database, this // relies on the operations already being present and would fail @@ -478,26 +704,17 @@ mod tests { assert!(result.is_ok()); // We can retrieve the most recent document view for this document by it's id. - let most_recent_document_view = db - .store - .get_document_by_id(document.id()) - .await - .unwrap() - .unwrap(); + let retrieved_document = db.store.get_document(document.id()).await.unwrap().unwrap(); // We can retrieve a specific document view for this document by it's view_id. // In this case, that should be the same as the view retrieved above. - let specific_document_view = db + let specific_document = db .store - .get_document_view_by_id(document.view_id()) + .get_document_by_view_id(document.view_id()) .await .unwrap() .unwrap(); - // The retrieved views should both have 9 fields. - assert_eq!(most_recent_document_view.len(), 9); - assert_eq!(specific_document_view.len(), 9); - for key in [ "username", "age", @@ -511,16 +728,10 @@ mod tests { ] { // The values contained in both retrieved document views // should match the expected ones. - assert!(most_recent_document_view.get(key).is_some()); - assert_eq!( - most_recent_document_view.get(key), - expected_document_view.get(key) - ); - assert!(specific_document_view.get(key).is_some()); - assert_eq!( - specific_document_view.get(key), - expected_document_view.get(key) - ); + assert!(retrieved_document.get(key).is_some()); + assert_eq!(retrieved_document.get(key), document.get(key)); + assert!(specific_document.get(key).is_some()); + assert_eq!(specific_document.get(key), document.get(key)); } }); } @@ -549,13 +760,13 @@ mod tests { // We retrieve the most recent view for this document by it's document id, // but as the document is deleted, we should get a none value back. - let document_view = db.store.get_document_by_id(document.id()).await.unwrap(); - assert!(document_view.is_none()); + let document = db.store.get_document(document.id()).await.unwrap(); + assert!(document.is_none()); // We also try to retrieve the specific document view by it's view id. // This should also return none as it is deleted. - let document_view = db.store.get_document_view_by_id(view_id).await.unwrap(); - assert!(document_view.is_none()); + let document = db.store.get_document_by_view_id(view_id).await.unwrap(); + assert!(document.is_none()); }); } @@ -594,10 +805,18 @@ mod tests { // Operations for this document id exist in the database. let document_id = db.test_data.documents[0].clone(); - // Get the operations and build the document. - let document = build_document(&db.store, &document_id).await; - // Get the oredered operations. - let sorted_operations = document.operations(); + // Get the operations for this document and sort them into linear order. + let operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); + let document_builder = DocumentBuilder::from(&operations); + let sorted_operations = build_graph(&document_builder.operations()) + .unwrap() + .sort() + .unwrap() + .sorted(); // We want to test that a document is updated. let mut current_operations = Vec::new(); @@ -619,23 +838,33 @@ mod tests { .expect("Insert document"); // We can retrieve the document's latest view by it's document id. - let latest_document_view = db + let retrieved_document = db .store - .get_document_by_id(document.id()) + .get_document(document.id()) .await - .expect("Get document view"); + .expect("Get document") + .expect("Unwrap document"); // And also retrieve the latest document view directly by it's document view id. - let specific_document_view = db + let specific_document = db .store - .get_document_view_by_id(document.view_id()) + .get_document_by_view_id(document.view_id()) .await - .expect("Get document view"); + .expect("Get document") + .expect("Unwrap document"); // The views should equal the current view of the document we inserted. // This includes the value and the view id. - assert_eq!(document.view(), latest_document_view.as_ref()); - assert_eq!(document.view(), specific_document_view.as_ref()); + assert_eq!(document.id(), retrieved_document.id()); + assert_eq!( + document.fields().unwrap(), + retrieved_document.fields().unwrap() + ); + assert_eq!(document.id(), specific_document.id()); + assert_eq!( + document.fields().unwrap(), + specific_document.fields().unwrap() + ); } }) } diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index b082ed6da..9d1986de1 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -2,161 +2,28 @@ use async_trait::async_trait; use lipmaa_link::get_lipmaa_links_back_to; -use p2panda_rs::entry::decode::decode_entry; use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; -use p2panda_rs::entry::{EncodedEntry, Entry, LogId, SeqNum, Signature}; +use p2panda_rs::entry::{EncodedEntry, Entry, LogId, SeqNum}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::EncodedOperation; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::error::EntryStorageError; -use p2panda_rs::storage_provider::traits::{EntryStore, EntryWithOperation}; +use p2panda_rs::storage_provider::traits::EntryStore; use sqlx::{query, query_as}; use crate::db::models::EntryRow; -use crate::db::provider::SqlStorage; - -/// A signed entry and it's encoded operation. Entries are the lowest level data type on the -/// p2panda network, they are signed by authors and form bamboo append only logs. The operation is -/// an entries' payload, it contains the data mutations which authors publish. -/// -/// This struct implements the `EntryWithOperation` trait which is required when constructing the -/// `EntryStore`. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct StorageEntry { - /// PublicKey of this entry. - pub(crate) public_key: PublicKey, - - /// Used log for this entry. - pub(crate) log_id: LogId, - - /// Sequence number of this entry. - pub(crate) seq_num: SeqNum, - - /// Hash of skiplink Bamboo entry. - pub(crate) skiplink: Option, - - /// Hash of previous Bamboo entry. - pub(crate) backlink: Option, - - /// Byte size of payload. - pub(crate) payload_size: u64, - - /// Hash of payload. - pub(crate) payload_hash: Hash, - - /// Ed25519 signature of entry. - pub(crate) signature: Signature, - - /// Encoded entry bytes. - pub(crate) encoded_entry: EncodedEntry, - - /// Encoded entry bytes. - pub(crate) payload: Option, -} - -impl EntryWithOperation for StorageEntry { - fn payload(&self) -> Option<&EncodedOperation> { - self.payload.as_ref() - } -} - -impl AsEntry for StorageEntry { - /// Returns public key of entry. - fn public_key(&self) -> &PublicKey { - &self.public_key - } - - /// Returns log id of entry. - fn log_id(&self) -> &LogId { - &self.log_id - } - - /// Returns sequence number of entry. - fn seq_num(&self) -> &SeqNum { - &self.seq_num - } - - /// Returns hash of skiplink entry when given. - fn skiplink(&self) -> Option<&Hash> { - self.skiplink.as_ref() - } - - /// Returns hash of backlink entry when given. - fn backlink(&self) -> Option<&Hash> { - self.backlink.as_ref() - } - - /// Returns payload size of operation. - fn payload_size(&self) -> u64 { - self.payload_size - } - - /// Returns payload hash of operation. - fn payload_hash(&self) -> &Hash { - &self.payload_hash - } - - /// Returns signature of entry. - fn signature(&self) -> &Signature { - &self.signature - } -} - -impl AsEncodedEntry for StorageEntry { - /// Generates and returns hash of encoded entry. - fn hash(&self) -> Hash { - self.encoded_entry.hash() - } - - /// Returns entry as bytes. - fn into_bytes(&self) -> Vec { - self.encoded_entry.into_bytes() - } - - /// Returns payload size (number of bytes) of total encoded entry. - fn size(&self) -> u64 { - self.encoded_entry.size() - } -} - -/// `From` implementation for converting an `EntryRow` into a `StorageEntry`. This is needed when -/// retrieving entries from the database. The `sqlx` crate coerces returned entry rows into -/// `EntryRow` but we want them as `StorageEntry` which contains typed values. -impl From for StorageEntry { - fn from(entry_row: EntryRow) -> Self { - let encoded_entry = EncodedEntry::from_bytes( - &hex::decode(entry_row.entry_bytes) - .expect("Decode entry hex entry bytes from database"), - ); - let entry = decode_entry(&encoded_entry).expect("Decoding encoded entry from database"); - StorageEntry { - public_key: entry.public_key().to_owned(), - log_id: entry.log_id().to_owned(), - seq_num: entry.seq_num().to_owned(), - skiplink: entry.skiplink().cloned(), - backlink: entry.backlink().cloned(), - payload_size: entry.payload_size(), - payload_hash: entry.payload_hash().to_owned(), - signature: entry.signature().to_owned(), - encoded_entry, - // We unwrap now as all entries currently contain a payload. - payload: entry_row.payload_bytes.map(|payload| { - EncodedOperation::from_bytes( - &hex::decode(payload).expect("Decode entry payload from database"), - ) - }), - } - } -} +use crate::db::types::StorageEntry; +use crate::db::SqlStore; /// Implementation of `EntryStore` trait which is required when constructing a `StorageProvider`. /// -/// Handles storage and retrieval of entries in the form of`StorageEntry` which implements the -/// required `EntryWithOperation` trait. An intermediary struct `EntryRow` is also used when retrieving -/// an entry from the database. +/// Handles storage and retrieval of entries in the form of `StorageEntry`. An intermediary struct +/// `EntryRow` is used when retrieving an entry from the database. #[async_trait] -impl EntryStore for SqlStorage { +impl EntryStore for SqlStore { + type Entry = StorageEntry; + /// Insert an entry into storage. /// /// Returns an error if the insertion doesn't result in exactly one @@ -209,10 +76,7 @@ impl EntryStore for SqlStorage { /// Returns a result containing the entry wrapped in an option if it was found successfully. /// Returns `None` if the entry was not found in storage. Errors when a fatal storage error /// occured. - async fn get_entry_by_hash( - &self, - hash: &Hash, - ) -> Result, EntryStorageError> { + async fn get_entry(&self, hash: &Hash) -> Result, EntryStorageError> { let entry_row = query_as::<_, EntryRow>( " SELECT @@ -453,12 +317,12 @@ impl EntryStore for SqlStorage { #[cfg(test)] mod tests { use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; - use p2panda_rs::entry::{EncodedEntry, Entry, LogId, SeqNum}; + use p2panda_rs::entry::{EncodedEntry, Entry, EntryBuilder, LogId, SeqNum}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::KeyPair; use p2panda_rs::operation::EncodedOperation; use p2panda_rs::schema::SchemaId; - use p2panda_rs::storage_provider::traits::{EntryStore, EntryWithOperation}; + use p2panda_rs::storage_provider::traits::EntryStore; use p2panda_rs::test_utils::fixtures::{encoded_entry, encoded_operation, entry, random_hash}; use rstest::rstest; @@ -482,7 +346,7 @@ mod tests { // Retrieve the entry again by it's hash. let retrieved_entry = db .store - .get_entry_by_hash(&encoded_entry.hash()) + .get_entry(&encoded_entry.hash()) .await .expect("Get entry") .expect("Unwrap entry"); @@ -501,14 +365,7 @@ mod tests { encoded_operation, retrieved_entry.payload().unwrap().to_owned() ); - - // Convert the retrieved entry back into the types we inserted. - let retreved_entry: Entry = retrieved_entry.clone().into(); - let retreved_encoded_entry: EncodedEntry = retrieved_entry.into(); - - // The types should match. - assert_eq!(retreved_entry, entry); - assert_eq!(retreved_encoded_entry, encoded_entry); + assert_eq!(retrieved_entry.encoded_entry, encoded_entry); }); } @@ -520,25 +377,30 @@ mod tests { ) { runner.with_db_teardown(|db: TestDatabase| async move { // The public key of the author who published the entries in the database - let public_key = db.test_data.key_pairs[0].public_key(); + let key_pair = &db.test_data.key_pairs[0]; // We get back the first entry. let first_entry = db .store - .get_entry_at_seq_num(&public_key, &LogId::default(), &SeqNum::new(1).unwrap()) + .get_entry_at_seq_num( + &key_pair.public_key(), + &LogId::default(), + &SeqNum::new(1).unwrap(), + ) .await .expect("Get entry") .unwrap(); + // Construct a new entry from it with the same values. + let entry = EntryBuilder::new() + .sign(first_entry.payload().unwrap(), key_pair) + .unwrap(); + // We try to publish it again which should error as entry hashes // have a unique constraint. let result = db .store - .insert_entry( - &first_entry.clone().into(), - &first_entry.clone().into(), - first_entry.payload(), - ) + .insert_entry(&entry, &first_entry.encoded_entry, first_entry.payload()) .await; assert!(result.is_err()); @@ -691,7 +553,7 @@ mod tests { } #[rstest] - fn get_entry_by_hash( + fn get_entry( #[from(test_db)] #[with(20, 1, 1)] runner: TestDatabaseRunner, @@ -712,12 +574,7 @@ mod tests { // The we retrieve them by their hash. let entry_hash = entry.hash(); - let entry_by_hash = db - .store - .get_entry_by_hash(&entry_hash) - .await - .unwrap() - .unwrap(); + let entry_by_hash = db.store.get_entry(&entry_hash).await.unwrap().unwrap(); // The entries should match. assert_eq!(entry, entry_by_hash) @@ -726,11 +583,7 @@ mod tests { // If we try to retrieve with a hash of an entry not in the db then // we should get none back. let entry_hash_not_in_db = random_hash(); - let entry = db - .store - .get_entry_by_hash(&entry_hash_not_in_db) - .await - .unwrap(); + let entry = db.store.get_entry(&entry_hash_not_in_db).await.unwrap(); assert!(entry.is_none()); }); } diff --git a/aquadoggo/src/db/stores/log.rs b/aquadoggo/src/db/stores/log.rs index df2009905..69df872fe 100644 --- a/aquadoggo/src/db/stores/log.rs +++ b/aquadoggo/src/db/stores/log.rs @@ -6,56 +6,10 @@ use p2panda_rs::entry::LogId; use p2panda_rs::identity::PublicKey; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::error::LogStorageError; -use p2panda_rs::storage_provider::traits::{AsStorageLog, LogStore}; +use p2panda_rs::storage_provider::traits::LogStore; use sqlx::{query, query_scalar}; -use crate::db::provider::SqlStorage; - -/// Tracks the assigment of an public_key's logs to documents and records their schema. -/// -/// This serves as an indexing layer on top of the lower-level bamboo entries. The node updates -/// this data according to what it sees in the newly incoming entries. -/// -/// `StorageLog` implements the trait `AsStorageLog` which is required when defining a `LogStore`. -#[derive(Debug)] -pub struct StorageLog { - public_key: PublicKey, - log_id: LogId, - document_id: DocumentId, - schema_id: SchemaId, -} - -impl AsStorageLog for StorageLog { - fn new( - public_key: &PublicKey, - schema_id: &SchemaId, - document_id: &DocumentId, - log_id: &LogId, - ) -> Self { - Self { - public_key: public_key.to_owned(), - log_id: log_id.to_owned(), - document_id: document_id.to_owned(), - schema_id: schema_id.to_owned(), - } - } - - fn public_key(&self) -> PublicKey { - self.public_key - } - - fn id(&self) -> LogId { - self.log_id - } - - fn document_id(&self) -> DocumentId { - self.document_id.clone() - } - - fn schema_id(&self) -> SchemaId { - self.schema_id.clone() - } -} +use crate::db::SqlStore; /// Implementation of `LogStore` trait which is required when constructing a /// `StorageProvider`. @@ -64,9 +18,15 @@ impl AsStorageLog for StorageLog { /// implements the required `AsStorageLog` trait. An intermediary struct `LogRow` /// is also used when retrieving a log from the database. #[async_trait] -impl LogStore for SqlStorage { +impl LogStore for SqlStore { /// Insert a log into storage. - async fn insert_log(&self, log: StorageLog) -> Result { + async fn insert_log( + &self, + log_id: &LogId, + public_key: &PublicKey, + schema: &SchemaId, + document: &DocumentId, + ) -> Result { let rows_affected = query( " INSERT INTO @@ -80,10 +40,10 @@ impl LogStore for SqlStorage { ($1, $2, $3, $4) ", ) - .bind(log.public_key().to_string()) - .bind(log.id().as_u64().to_string()) - .bind(log.document_id().as_str()) - .bind(log.schema_id().to_string()) + .bind(public_key.to_string()) + .bind(log_id.as_u64().to_string()) + .bind(document.as_str()) + .bind(schema.to_string()) .execute(&self.pool) .await .map_err(|e| LogStorageError::Custom(e.to_string()))? @@ -93,7 +53,7 @@ impl LogStore for SqlStorage { } /// Get a log from storage - async fn get( + async fn get_log_id( &self, public_key: &PublicKey, document_id: &DocumentId, @@ -124,63 +84,6 @@ impl LogStore for SqlStorage { Ok(log_id) } - /// Determines the next unused log_id of an public_key. - /// - /// @TODO: This will be deprecated as functionality is replaced by - /// `latest_log_id + validated next log id methods. - async fn next_log_id(&self, public_key: &PublicKey) -> Result { - // Get all log ids from this public_key - let mut result: Vec = query_scalar( - " - SELECT - log_id - FROM - logs - WHERE - public_key = $1 - ", - ) - .bind(public_key.to_string()) - .fetch_all(&self.pool) - .await - .map_err(|e| LogStorageError::Custom(e.to_string()))?; - - // Convert all strings representing u64 integers to `LogId` instances - let mut log_ids: Vec = result - .iter_mut() - .map(|str| { - str.parse().unwrap_or_else(|_| { - panic!("Corrupt u64 integer found in database: '{0}'", &str) - }) - }) - .collect(); - - // The log id selection below expects log ids in sorted order. We can't easily use SQL - // for this because log IDs are stored as `VARCHAR`, which doesn't sort numbers correctly. - // A good solution would not require reading all existing log ids to find the next - // available one. See this issue: https://github.com/p2panda/aquadoggo/issues/67 - log_ids.sort(); - - // Find next unused document log by comparing the sequence of known log ids with an - // sequence of subsequent log ids until we find a gap. - let mut next_log_id = LogId::default(); - - for log_id in log_ids.iter() { - // Success! Found unused log id - if next_log_id != *log_id { - break; - } - - // Otherwise, try next possible log id - next_log_id = match next_log_id.next() { - Some(log_id) => Ok(log_id), - None => Err(LogStorageError::Custom("Max log id reached".to_string())), - }?; - } - - Ok(next_log_id) - } - /// Determines the latest `LogId` of an public_key. /// /// Returns either the highest known `LogId` for an public_key or `None` if no logs are known from @@ -220,21 +123,16 @@ impl LogStore for SqlStorage { #[cfg(test)] mod tests { use p2panda_rs::document::{DocumentId, DocumentViewId}; - use p2panda_rs::entry::decode::decode_entry; - use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; - use p2panda_rs::entry::{EncodedEntry, LogId}; + use p2panda_rs::entry::LogId; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::OperationId; use p2panda_rs::schema::SchemaId; - use p2panda_rs::storage_provider::traits::{ - AsStorageLog, EntryStore, LogStore, StorageProvider, - }; + use p2panda_rs::storage_provider::traits::LogStore; use p2panda_rs::test_utils::fixtures::{ - encoded_entry, public_key, random_document_id, random_operation_id, schema_id, + public_key, random_document_id, random_operation_id, schema_id, }; use rstest::rstest; - use crate::db::stores::log::StorageLog; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] @@ -245,16 +143,16 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, ) { runner.with_db_teardown(move |db: TestDatabase| async move { - let log = StorageLog::new( - &public_key, - &schema_id, - &document.clone(), - &LogId::default(), - ); - assert!(db.store.insert_log(log).await.is_ok()); - - let log = StorageLog::new(&public_key, &schema_id, &document, &LogId::default()); - assert!(db.store.insert_log(log).await.is_err()); + assert!(db + .store + .insert_log(&LogId::default(), &public_key, &schema_id, &document) + .await + .is_ok()); + assert!(db + .store + .insert_log(&LogId::default(), &public_key, &schema_id, &document) + .await + .is_err()); }); } @@ -272,9 +170,11 @@ mod tests { &DocumentViewId::new(&[operation_id_1, operation_id_2]), ); - let log = StorageLog::new(&public_key, &schema, &document, &LogId::default()); - - assert!(db.store.insert_log(log).await.is_ok()); + assert!(db + .store + .insert_log(&LogId::default(), &public_key, &schema, &document) + .await + .is_ok()); }); } @@ -283,7 +183,6 @@ mod tests { #[from(public_key)] public_key: PublicKey, #[from(schema_id)] schema_id: SchemaId, #[from(test_db)] runner: TestDatabaseRunner, - #[from(random_document_id)] document_id: DocumentId, ) { runner.with_db_teardown(move |db: TestDatabase| async move { let log_id = db.store.latest_log_id(&public_key).await.unwrap(); @@ -291,100 +190,19 @@ mod tests { assert_eq!(log_id, None); for n in 0..12 { - let log = StorageLog::new(&public_key, &schema_id, &document_id, &LogId::new(n)); - db.store.insert_log(log).await.unwrap(); + db.store + .insert_log( + &LogId::new(n), + &public_key, + &schema_id, + &random_document_id(), + ) + .await + .unwrap(); let log_id = db.store.latest_log_id(&public_key).await.unwrap(); assert_eq!(Some(LogId::new(n)), log_id); } }); } - - #[rstest] - fn document_log_id( - #[from(schema_id)] schema_id: SchemaId, - #[from(encoded_entry)] encoded_entry: EncodedEntry, - #[from(test_db)] runner: TestDatabaseRunner, - ) { - runner.with_db_teardown(move |db: TestDatabase| async move { - // Expect database to return nothing yet - assert_eq!( - db.store - .get_document_by_entry(&encoded_entry.hash()) - .await - .unwrap(), - None - ); - - let entry = decode_entry(&encoded_entry).unwrap(); - let public_key = entry.public_key(); - // Store entry in database - assert!(db - .store - .insert_entry(&entry, &encoded_entry, None) - .await - .is_ok()); - - let log = StorageLog::new( - public_key, - &schema_id, - &encoded_entry.hash().into(), - &LogId::default(), - ); - - // Store log in database - assert!(db.store.insert_log(log).await.is_ok()); - - // Expect to find document id in database. The document id should be the same as the - // hash of the first entry in the log. - assert_eq!( - db.store - .get_document_by_entry(&encoded_entry.hash()) - .await - .unwrap(), - Some(encoded_entry.hash().into()) - ); - }); - } - - #[rstest] - fn log_ids( - #[from(public_key)] public_key: PublicKey, - #[from(test_db)] runner: TestDatabaseRunner, - #[from(schema_id)] schema_id: SchemaId, - #[from(random_document_id)] document_first: DocumentId, - #[from(random_document_id)] document_second: DocumentId, - #[from(random_document_id)] document_third: DocumentId, - #[from(random_document_id)] document_forth: DocumentId, - ) { - runner.with_db_teardown(move |db: TestDatabase| async move { - // Register two log ids at the beginning - let log_1 = - StorageLog::new(&public_key, &schema_id, &document_first, &LogId::default()); - let log_2 = StorageLog::new(&public_key, &schema_id, &document_second, &LogId::new(1)); - - db.store.insert_log(log_1).await.unwrap(); - db.store.insert_log(log_2).await.unwrap(); - - // Find next free log id and register it - let log_id = db.store.next_log_id(&public_key).await.unwrap(); - assert_eq!(log_id, LogId::new(2)); - - let log_3 = StorageLog::new(&public_key, &schema_id, &document_third, &log_id); - - db.store.insert_log(log_3).await.unwrap(); - - // Find next free log id and register it - let log_id = db.store.next_log_id(&public_key).await.unwrap(); - assert_eq!(log_id, LogId::new(3)); - - let log_4 = StorageLog::new(&public_key, &schema_id, &document_forth, &log_id); - - db.store.insert_log(log_4).await.unwrap(); - - // Find next free log id - let log_id = db.store.next_log_id(&public_key).await.unwrap(); - assert_eq!(log_id, LogId::new(4)); - }); - } } diff --git a/aquadoggo/src/db/stores/mod.rs b/aquadoggo/src/db/stores/mod.rs index 69ea7ea45..ca7561f75 100644 --- a/aquadoggo/src/db/stores/mod.rs +++ b/aquadoggo/src/db/stores/mod.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! Implementations of all `p2panda-rs` defined storage provider traits and additionally +//! `aquadoggo` specific interfaces. pub mod document; mod entry; mod log; @@ -8,7 +10,3 @@ mod schema; mod task; #[cfg(test)] pub mod test_utils; - -pub use self::log::StorageLog; -pub use entry::StorageEntry; -pub use operation::StorageOperation; diff --git a/aquadoggo/src/db/stores/operation.rs b/aquadoggo/src/db/stores/operation.rs index 420d2e4cb..93f32c832 100644 --- a/aquadoggo/src/db/stores/operation.rs +++ b/aquadoggo/src/db/stores/operation.rs @@ -4,82 +4,19 @@ use std::collections::BTreeMap; use async_trait::async_trait; use futures::future::try_join_all; -use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::document::DocumentId; use p2panda_rs::identity::PublicKey; -use p2panda_rs::operation::traits::{AsOperation, AsVerifiedOperation}; -use p2panda_rs::operation::{ - OperationAction, OperationFields, OperationId, OperationVersion, VerifiedOperation, -}; +use p2panda_rs::operation::traits::AsOperation; +use p2panda_rs::operation::{Operation, OperationId}; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::error::OperationStorageError; use p2panda_rs::storage_provider::traits::OperationStore; use sqlx::{query, query_as, query_scalar}; +use crate::db::models::utils::{parse_operation_rows, parse_value_to_string_vec}; use crate::db::models::OperationFieldsJoinedRow; -use crate::db::provider::SqlStorage; -use crate::db::utils::{parse_operation_rows, parse_value_to_string_vec}; - -pub struct StorageOperation { - /// Identifier of the operation. - pub(crate) id: OperationId, - - /// Version of this operation. - pub(crate) version: OperationVersion, - - /// Action of this operation. - pub(crate) action: OperationAction, - - /// Schema instance of this operation. - pub(crate) schema_id: SchemaId, - - /// Previous operations field. - pub(crate) previous: Option, - - /// Operation fields. - pub(crate) fields: Option, - - /// The public key of the key pair used to publish this operation. - pub(crate) public_key: PublicKey, -} - -impl AsVerifiedOperation for StorageOperation { - /// Returns the identifier for this operation. - fn id(&self) -> &OperationId { - &self.id - } - - /// Returns the public key of the author of this operation. - fn public_key(&self) -> &PublicKey { - &self.public_key - } -} - -impl AsOperation for StorageOperation { - /// Returns action type of operation. - fn action(&self) -> OperationAction { - self.action.to_owned() - } - - /// Returns schema id of operation. - fn schema_id(&self) -> SchemaId { - self.schema_id.to_owned() - } - - /// Returns version of operation. - fn version(&self) -> OperationVersion { - self.version.to_owned() - } - - /// Returns application data fields of operation. - fn fields(&self) -> Option { - self.fields.clone() - } - - /// Returns vector of this operation's previous operation ids. - fn previous(&self) -> Option { - self.previous.clone() - } -} +use crate::db::types::StorageOperation; +use crate::db::SqlStore; /// Implementation of `OperationStore` trait which is required when constructing a /// `StorageProvider`. @@ -92,12 +29,14 @@ impl AsOperation for StorageOperation { /// used in conjunction with the `sqlx` library to coerce raw values into structs when querying the /// database. #[async_trait] -impl OperationStore for SqlStorage { +impl OperationStore for SqlStore { + type Operation = StorageOperation; + /// Get the id of the document an operation is part of. /// /// Returns a result containing a `DocumentId` wrapped in an option. If no document was found, /// then this method returns None. Errors if a fatal storage error occurs. - async fn get_document_by_operation_id( + async fn get_document_id_by_operation_id( &self, id: &OperationId, ) -> Result, OperationStorageError> { @@ -132,7 +71,9 @@ impl OperationStore for SqlStorage { /// different sets of insertions. async fn insert_operation( &self, - operation: &VerifiedOperation, + id: &OperationId, + public_key: &PublicKey, + operation: &Operation, document_id: &DocumentId, ) -> Result<(), OperationStorageError> { // Start a transaction, any db insertions after this point, and before the `commit()` will @@ -160,9 +101,9 @@ impl OperationStore for SqlStorage { ($1, $2, $3, $4, $5, $6) ", ) - .bind(operation.public_key().to_string()) + .bind(public_key.to_string()) .bind(document_id.as_str()) - .bind(operation.id().as_str()) + .bind(id.as_str()) .bind(operation.action().as_str()) .bind(operation.schema_id().to_string()) .bind( @@ -205,7 +146,7 @@ impl OperationStore for SqlStorage { ($1, $2, $3, $4, $5) ", ) - .bind(operation.id().as_str().to_owned()) + .bind(id.as_str().to_owned()) .bind(name.to_owned()) .bind(value.field_type().to_string()) .bind(db_value) @@ -230,9 +171,7 @@ impl OperationStore for SqlStorage { .iter() .any(|query_result| query_result.rows_affected() != 1) { - return Err(OperationStorageError::InsertionError( - operation.id().clone(), - )); + return Err(OperationStorageError::InsertionError(id.clone())); } // Commit the transaction. @@ -248,7 +187,7 @@ impl OperationStore for SqlStorage { /// /// Returns a result containing an `VerifiedOperation` wrapped in an option, if no operation /// with this id was found, returns none. Errors if a fatal storage error occured. - async fn get_operation_by_id( + async fn get_operation( &self, id: &OperationId, ) -> Result, OperationStorageError> { @@ -340,93 +279,151 @@ impl OperationStore for SqlStorage { Ok(operations) } + + /// Get all operations that are part of a given document. + async fn get_operations_by_schema_id( + &self, + id: &SchemaId, + ) -> Result, OperationStorageError> { + let operation_rows = query_as::<_, OperationFieldsJoinedRow>( + " + SELECT + operations_v1.public_key, + operations_v1.document_id, + operations_v1.operation_id, + operations_v1.action, + operations_v1.schema_id, + operations_v1.previous, + operation_fields_v1.name, + operation_fields_v1.field_type, + operation_fields_v1.value, + operation_fields_v1.list_index + FROM + operations_v1 + LEFT JOIN operation_fields_v1 + ON + operation_fields_v1.operation_id = operations_v1.operation_id + WHERE + operations_v1.schema_id = $1 + ORDER BY + operation_fields_v1.list_index ASC + ", + ) + .bind(id.to_string()) + .fetch_all(&self.pool) + .await + .map_err(|e| OperationStorageError::FatalStorageError(e.to_string()))?; + + let mut grouped_operation_rows: BTreeMap> = + BTreeMap::new(); + + for operation_row in operation_rows { + if let Some(current_operations) = + grouped_operation_rows.get_mut(&operation_row.operation_id) + { + current_operations.push(operation_row) + } else { + grouped_operation_rows + .insert(operation_row.clone().operation_id, vec![operation_row]); + }; + } + + let operations: Vec = grouped_operation_rows + .iter() + .filter_map(|(_id, operation_rows)| parse_operation_rows(operation_rows.to_owned())) + .collect(); + + Ok(operations) + } } #[cfg(test)] mod tests { use p2panda_rs::document::DocumentId; - use p2panda_rs::identity::KeyPair; - use p2panda_rs::operation::traits::{AsOperation, AsVerifiedOperation}; - use p2panda_rs::operation::VerifiedOperation; + use p2panda_rs::identity::PublicKey; + use p2panda_rs::operation::traits::{AsOperation, WithPublicKey}; + use p2panda_rs::operation::{Operation, OperationAction, OperationBuilder, OperationId}; + use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::OperationStore; use p2panda_rs::test_utils::constants::test_fields; use p2panda_rs::test_utils::fixtures::{ - document_id, key_pair, operation_fields, random_document_view_id, random_key_pair, - random_previous_operations, verified_operation, verified_operation_with_schema, + document_id, operation, operation_id, operation_with_schema, public_key, + random_document_view_id, random_operation_id, random_previous_operations, schema_id, }; + use p2panda_rs::WithId; use rstest::rstest; - use crate::db::stores::test_utils::{ - doggo_fields, doggo_schema, test_db, TestDatabase, TestDatabaseRunner, - }; + use crate::db::stores::test_utils::{doggo_fields, test_db, TestDatabase, TestDatabaseRunner}; #[rstest] - #[case::create_operation(verified_operation_with_schema( + #[case::create_operation(operation_with_schema( Some(test_fields().into()), None, - random_key_pair() ))] - #[case::update_operation(verified_operation_with_schema( + #[case::update_operation(operation_with_schema( Some(test_fields().into()), Some(random_document_view_id()), - random_key_pair() ))] #[case::update_operation_many_prev_ops( - verified_operation_with_schema( + operation_with_schema( Some(test_fields().into()), Some(random_previous_operations(12)), - random_key_pair() ) )] - #[case::delete_operation(verified_operation_with_schema( - None, - Some(random_document_view_id()), - random_key_pair() - ))] - #[case::delete_operation_many_prev_ops(verified_operation_with_schema( + #[case::delete_operation(operation_with_schema(None, Some(random_document_view_id()),))] + #[case::delete_operation_many_prev_ops(operation_with_schema( None, Some(random_previous_operations(12)), - random_key_pair() ))] fn insert_and_get_operations( - #[case] operation: VerifiedOperation, + #[case] operation: Operation, + operation_id: OperationId, + public_key: PublicKey, document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { // Insert the doggo operation into the db, returns Ok(true) when succesful. - let result = db.store.insert_operation(&operation, &document_id).await; + let result = db + .store + .insert_operation(&operation_id, &public_key, &operation, &document_id) + .await; assert!(result.is_ok()); // Request the previously inserted operation by it's id. let returned_operation = db .store - .get_operation_by_id(operation.id()) + .get_operation(&operation_id) .await .unwrap() .unwrap(); - assert_eq!(returned_operation.public_key(), operation.public_key()); + assert_eq!(returned_operation.public_key(), &public_key); assert_eq!(returned_operation.fields(), operation.fields()); - assert_eq!(returned_operation.id(), operation.id()); + assert_eq!( + WithId::::id(&returned_operation), + &operation_id + ); }); } #[rstest] fn insert_operation_twice( - #[from(verified_operation)] verified_operation: VerifiedOperation, + operation: Operation, + operation_id: OperationId, + public_key: PublicKey, document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { db.store - .insert_operation(&verified_operation, &document_id) + .insert_operation(&operation_id, &public_key, &operation, &document_id) .await .unwrap(); assert!(db .store - .insert_operation(&verified_operation, &document_id) + .insert_operation(&operation_id, &public_key, &operation, &document_id) .await .is_err()); }); @@ -434,31 +431,33 @@ mod tests { #[rstest] fn gets_document_by_operation_id( - #[from(verified_operation)] create_operation: VerifiedOperation, - key_pair: KeyPair, + operation: Operation, + operation_id: OperationId, + public_key: PublicKey, document_id: DocumentId, + schema_id: SchemaId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { // Getting a document by operation id which isn't stored in the database // should return none. assert!(db .store - .get_document_by_operation_id(create_operation.id()) + .get_document_id_by_operation_id(&operation_id) .await .expect("Get document id by operation id") .is_none()); // Now we insert the operation. db.store - .insert_operation(&create_operation, &document_id) + .insert_operation(&operation_id, &public_key, &operation, &document_id) .await .unwrap(); // The same request should return the expected document id. assert_eq!( db.store - .get_document_by_operation_id(create_operation.id()) + .get_document_id_by_operation_id(&operation_id) .await .expect("Get document id by operation id") .expect("Unwrap document id"), @@ -466,22 +465,29 @@ mod tests { ); // We now create and insert an update to the same document. - let update_operation = verified_operation( - Some(operation_fields(doggo_fields())), - doggo_schema(), - Some(create_operation.id().to_owned().into()), - key_pair, - ); + let update_operation = OperationBuilder::new(&schema_id) + .action(OperationAction::Update) + .fields(&doggo_fields()) + .previous(&operation_id.into()) + .build() + .expect("Builds operation"); + + let update_operation_id = random_operation_id(); db.store - .insert_operation(&update_operation, &document_id) + .insert_operation( + &update_operation_id, + &public_key, + &update_operation, + &document_id, + ) .await .unwrap(); // Getting the document by the id of the new update document should also work. assert_eq!( db.store - .get_document_by_operation_id(update_operation.id()) + .get_document_id_by_operation_id(&update_operation_id) .await .expect("Get document id by operation id") .expect("Unwrap document id"), diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index 3155c58c1..c9ff8e91e 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -2,18 +2,18 @@ use std::convert::{TryFrom, TryInto}; -use async_trait::async_trait; +use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::DocumentViewId; use p2panda_rs::schema::system::{SchemaFieldView, SchemaView}; use p2panda_rs::schema::{Schema, SchemaId}; +use p2panda_rs::storage_provider::error::OperationStorageError; use p2panda_rs::storage_provider::traits::DocumentStore; +use sqlx::query_scalar; use crate::db::errors::SchemaStoreError; -use crate::db::provider::SqlStorage; -use crate::db::traits::SchemaStore; +use crate::db::SqlStore; -#[async_trait] -impl SchemaStore for SqlStorage { +impl SqlStore { /// Get a Schema from the database by it's document view id. /// /// Internally, this method performs three steps: @@ -23,13 +23,15 @@ impl SchemaStore for SqlStorage { /// /// If no schema definition with the passed id is found then None is returned, if any of the /// other steps can't be completed, then an error is returned. - async fn get_schema_by_id( + pub async fn get_schema_by_id( &self, id: &DocumentViewId, ) -> Result, SchemaStoreError> { // Fetch the document view for the schema - let schema_view: SchemaView = match self.get_document_view_by_id(id).await? { - Some(document_view) => document_view.try_into()?, + let schema_view: SchemaView = match self.get_document_by_view_id(id).await? { + // We can unwrap the document view here as documents returned from this store method + // all contain views. + Some(document) => document.view().unwrap().try_into()?, None => return Ok(None), }; @@ -38,8 +40,10 @@ impl SchemaStore for SqlStorage { for field_id in schema_view.fields().iter() { // Fetch schema field document views let scheme_field_view: SchemaFieldView = - match self.get_document_view_by_id(field_id).await? { - Some(document_view) => document_view.try_into()?, + match self.get_document_by_view_id(field_id).await? { + // We can unwrap the document view here as documents returned from this store + // method all contain views. + Some(document) => document.view().unwrap().try_into()?, None => return Ok(None), }; @@ -59,19 +63,21 @@ impl SchemaStore for SqlStorage { /// Returns an error if a fatal db error occured. /// /// Silently ignores incomplete or broken schema definitions. - async fn get_all_schema(&self) -> Result, SchemaStoreError> { + pub async fn get_all_schema(&self) -> Result, SchemaStoreError> { let schema_views: Vec = self .get_documents_by_schema(&SchemaId::new("schema_definition_v1")?) .await? .into_iter() - .filter_map(|view| SchemaView::try_from(view).ok()) + // We can unwrap the document view here as documents returned from this store method all contain views. + .filter_map(|document| SchemaView::try_from(document.view().unwrap()).ok()) .collect(); let schema_field_views: Vec = self .get_documents_by_schema(&SchemaId::new("schema_field_definition_v1")?) .await? .into_iter() - .filter_map(|view| SchemaFieldView::try_from(view).ok()) + // We can unwrap the document view here as documents returned from this store method all contain views. + .filter_map(|document| SchemaFieldView::try_from(document.view().unwrap()).ok()) .collect(); let mut all_schema = vec![]; @@ -89,10 +95,37 @@ impl SchemaStore for SqlStorage { Ok(all_schema.into_iter().flatten().collect()) } + + /// Returns the schema id for a document view. + /// + /// Returns `None` if this document view is not found. + pub async fn get_schema_by_document_view( + &self, + view_id: &DocumentViewId, + ) -> Result, SchemaStoreError> { + let result: Option = query_scalar( + " + SELECT + schema_id + FROM + document_views + WHERE + document_view_id = $1 + ", + ) + .bind(view_id.to_string()) + .fetch_optional(&self.pool) + .await + .map_err(|e| OperationStorageError::FatalStorageError(e.to_string()))?; + + // Unwrap because we expect no invalid schema ids in the db. + Ok(result.map(|id_str| id_str.parse().unwrap())) + } } #[cfg(test)] mod tests { + use p2panda_rs::document::DocumentViewId; use p2panda_rs::identity::KeyPair; use p2panda_rs::schema::{FieldType, SchemaId}; use p2panda_rs::test_utils::fixtures::{key_pair, random_document_view_id}; @@ -102,8 +135,6 @@ mod tests { add_document, add_schema, test_db, TestDatabase, TestDatabaseRunner, }; - use super::SchemaStore; - #[rstest] fn get_schema(key_pair: KeyPair, #[from(test_db)] runner: TestDatabaseRunner) { runner.with_db_teardown(move |mut db: TestDatabase| async move { @@ -179,4 +210,54 @@ mod tests { assert!(schema.is_none()); }); } + + #[rstest] + fn test_get_schema_for_view( + key_pair: KeyPair, + #[from(test_db)] + #[with(1, 1, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|mut db: TestDatabase| async move { + let schema = add_schema( + &mut db, + "venue", + vec![ + ("description", FieldType::String), + ("profile_name", FieldType::String), + ], + &key_pair, + ) + .await; + + let document_view_id = match schema.id() { + SchemaId::Application(_, view_id) => view_id, + _ => panic!("Invalid schema id"), + }; + + let result = db.store.get_schema_by_document_view(document_view_id).await; + + assert!(result.is_ok()); + // This is the schema name of the schema document we published. + assert_eq!(result.unwrap().unwrap().name(), "schema_definition"); + }); + } + + #[rstest] + fn test_get_schema_for_missing_view( + random_document_view_id: DocumentViewId, + #[from(test_db)] + #[with(1, 1, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let result = db + .store + .get_schema_by_document_view(&random_document_view_id) + .await; + + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); + }); + } } diff --git a/aquadoggo/src/db/stores/task.rs b/aquadoggo/src/db/stores/task.rs index aa45ce9dd..ac02c7830 100644 --- a/aquadoggo/src/db/stores/task.rs +++ b/aquadoggo/src/db/stores/task.rs @@ -4,15 +4,15 @@ use anyhow::Result; use p2panda_rs::document::{DocumentId, DocumentViewId}; use sqlx::{query, query_as}; -use crate::db::errors::SqlStorageError; +use crate::db::errors::SqlStoreError; use crate::db::models::TaskRow; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::materializer::{Task, TaskInput}; /// Methods to interact with the `tasks` table in the database. -impl SqlStorage { +impl SqlStore { /// Inserts a "pending" task into the database. - pub async fn insert_task(&self, task: &Task) -> Result<(), SqlStorageError> { + pub async fn insert_task(&self, task: &Task) -> Result<(), SqlStoreError> { // Convert task input to correct database types let task_input = task.input(); let document_id = task_input.document_id.as_ref().map(|id| id.as_str()); @@ -40,13 +40,13 @@ impl SqlStorage { .bind(document_view_id) .execute(&self.pool) .await - .map_err(|err| SqlStorageError::Transaction(err.to_string()))?; + .map_err(|err| SqlStoreError::Transaction(err.to_string()))?; Ok(()) } /// Removes a "pending" task from the database. - pub async fn remove_task(&self, task: &Task) -> Result<(), SqlStorageError> { + pub async fn remove_task(&self, task: &Task) -> Result<(), SqlStoreError> { // Convert task input to correct database types let task_input = task.input(); let document_id = task_input.document_id.as_ref().map(|id| id.as_str()); @@ -73,17 +73,17 @@ impl SqlStorage { .bind(document_view_id) .execute(&self.pool) .await - .map_err(|err| SqlStorageError::Transaction(err.to_string()))?; + .map_err(|err| SqlStoreError::Transaction(err.to_string()))?; if result.rows_affected() != 1 { - Err(SqlStorageError::Deletion("tasks".into())) + Err(SqlStoreError::Deletion("tasks".into())) } else { Ok(()) } } /// Returns "pending" tasks of the materialization service worker. - pub async fn get_tasks(&self) -> Result>, SqlStorageError> { + pub async fn get_tasks(&self) -> Result>, SqlStoreError> { let task_rows = query_as::<_, TaskRow>( " SELECT @@ -96,7 +96,7 @@ impl SqlStorage { ) .fetch_all(&self.pool) .await - .map_err(|err| SqlStorageError::Transaction(err.to_string()))?; + .map_err(|err| SqlStoreError::Transaction(err.to_string()))?; // Convert database rows into correct p2panda types let mut tasks: Vec> = Vec::new(); diff --git a/aquadoggo/src/db/stores/test_utils/helpers.rs b/aquadoggo/src/db/stores/test_utils/helpers.rs index 8aed149ed..accaff42b 100644 --- a/aquadoggo/src/db/stores/test_utils/helpers.rs +++ b/aquadoggo/src/db/stores/test_utils/helpers.rs @@ -1,7 +1,9 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::convert::TryFrom; + use log::{debug, info}; -use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::document::{Document, DocumentId, DocumentViewId}; use p2panda_rs::entry::traits::AsEncodedEntry; use p2panda_rs::hash::Hash; use p2panda_rs::identity::KeyPair; @@ -9,9 +11,10 @@ use p2panda_rs::operation::{ OperationBuilder, OperationValue, PinnedRelation, PinnedRelationList, Relation, RelationList, }; use p2panda_rs::schema::{FieldType, Schema, SchemaId}; +use p2panda_rs::storage_provider::traits::OperationStore; use p2panda_rs::test_utils::constants; -use p2panda_rs::test_utils::db::test_db::send_to_store; use p2panda_rs::test_utils::fixtures::{schema, schema_fields}; +use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use crate::db::stores::test_utils::TestDatabase; use crate::materializer::tasks::{dependency_task, reduce_task, schema_task}; @@ -88,6 +91,18 @@ pub fn doggo_fields() -> Vec<(&'static str, OperationValue)> { ] } +/// Build a document from it's stored operations specified by it's document id. +pub async fn build_document(store: &S, document_id: &DocumentId) -> Document { + // We retrieve the operations. + let document_operations = store + .get_operations_by_document_id(document_id) + .await + .expect("Get operations"); + + // Then we construct the document. + Document::try_from(&document_operations).expect("Build the document") +} + /// Publish a document and materialise it in a given `TestDatabase`. /// /// Also runs dependency task for document. diff --git a/aquadoggo/src/db/stores/test_utils/mod.rs b/aquadoggo/src/db/stores/test_utils/mod.rs index eb3c14fe1..46cb3f3d9 100644 --- a/aquadoggo/src/db/stores/test_utils/mod.rs +++ b/aquadoggo/src/db/stores/test_utils/mod.rs @@ -4,6 +4,6 @@ mod helpers; mod runner; mod store; -pub use helpers::{add_document, add_schema, doggo_fields, doggo_schema}; +pub use helpers::{add_document, add_schema, build_document, doggo_fields, doggo_schema}; pub use runner::{test_db, with_db_manager_teardown, TestDatabaseManager, TestDatabaseRunner}; pub use store::{TestData, TestDatabase}; diff --git a/aquadoggo/src/db/stores/test_utils/runner.rs b/aquadoggo/src/db/stores/test_utils/runner.rs index f85106773..8f34cfbbd 100644 --- a/aquadoggo/src/db/stores/test_utils/runner.rs +++ b/aquadoggo/src/db/stores/test_utils/runner.rs @@ -6,16 +6,16 @@ use std::sync::Arc; use futures::Future; use p2panda_rs::operation::OperationValue; use p2panda_rs::schema::Schema; -use p2panda_rs::test_utils::db::test_db::{populate_store, PopulateDatabaseConfig}; +use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; use rstest::fixture; use tokio::runtime::Builder; use tokio::sync::Mutex; use crate::config::Configuration; use crate::context::Context; -use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{TestData, TestDatabase}; use crate::db::Pool; +use crate::db::SqlStore; use crate::schema::SchemaProvider; use crate::test_helpers::{initialize_db, initialize_db_with_url}; @@ -59,7 +59,7 @@ where // We may still want to keep this "single database" runner injected through `rstest` but in any // case probably best to consider that in a different PR. pub struct TestDatabaseRunner { - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, } impl TestDatabaseRunner { @@ -79,7 +79,7 @@ impl TestDatabaseRunner { runtime.block_on(async { // Initialise store let pool = initialize_db().await; - let store = SqlStorage::new(pool); + let store = SqlStore::new(pool); // Populate the store and construct test data let (key_pairs, documents) = populate_store(&store, &self.config).await; @@ -144,7 +144,7 @@ impl TestDatabaseManager { let pool = initialize_db_with_url(url).await; // Initialise test store using pool. - let store = SqlStorage::new(pool.clone()); + let store = SqlStore::new(pool.clone()); let test_db = TestDatabase::new(store.clone()); @@ -179,7 +179,7 @@ pub fn test_db( // The fields used for every UPDATE operation #[default(doggo_fields())] update_operation_fields: Vec<(&'static str, OperationValue)>, ) -> TestDatabaseRunner { - let config = PopulateDatabaseConfig { + let config = PopulateStoreConfig { no_of_entries, no_of_logs, no_of_public_keys, diff --git a/aquadoggo/src/db/stores/test_utils/store.rs b/aquadoggo/src/db/stores/test_utils/store.rs index ec24de80d..1799d47ad 100644 --- a/aquadoggo/src/db/stores/test_utils/store.rs +++ b/aquadoggo/src/db/stores/test_utils/store.rs @@ -2,23 +2,22 @@ use p2panda_rs::document::DocumentId; use p2panda_rs::identity::KeyPair; -use p2panda_rs::storage_provider::traits::StorageProvider; use crate::config::Configuration; use crate::context::Context; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::schema::SchemaProvider; /// Container for `SqlStore` with access to the document ids and key_pairs used in the /// pre-populated database for testing. -pub struct TestDatabase { - pub context: Context, - pub store: S, +pub struct TestDatabase { + pub context: Context, + pub store: SqlStore, pub test_data: TestData, } -impl TestDatabase { - pub fn new(store: S) -> Self { +impl TestDatabase { + pub fn new(store: SqlStore) -> Self { // Initialise context for store. let context = Context::new( store.clone(), diff --git a/aquadoggo/src/db/traits/mod.rs b/aquadoggo/src/db/traits/mod.rs deleted file mode 100644 index 0ad88b1b7..000000000 --- a/aquadoggo/src/db/traits/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -mod schema; - -pub use schema::SchemaStore; diff --git a/aquadoggo/src/db/traits/schema.rs b/aquadoggo/src/db/traits/schema.rs deleted file mode 100644 index c92903534..000000000 --- a/aquadoggo/src/db/traits/schema.rs +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use async_trait::async_trait; -use p2panda_rs::document::DocumentViewId; -use p2panda_rs::schema::Schema; - -use crate::db::errors::SchemaStoreError; - -#[async_trait] -pub trait SchemaStore { - /// Get a published Schema from storage by it's document view id. - /// - /// Returns a Schema or None if no schema was found with this document view id. Returns - /// an error if a fatal storage error occured. - async fn get_schema_by_id( - &self, - id: &DocumentViewId, - ) -> Result, SchemaStoreError>; - - /// Get all published Schema from storage. - /// - /// Returns a vector of Schema, or an empty vector if none were found. Returns - /// an error when a fatal storage error occured or a schema could not be constructed. - async fn get_all_schema(&self) -> Result, SchemaStoreError>; -} diff --git a/aquadoggo/src/db/types/document.rs b/aquadoggo/src/db/types/document.rs new file mode 100644 index 000000000..4858151c1 --- /dev/null +++ b/aquadoggo/src/db/types/document.rs @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use p2panda_rs::document::traits::AsDocument; +use p2panda_rs::document::{DocumentId, DocumentViewFields, DocumentViewId}; +use p2panda_rs::identity::PublicKey; +use p2panda_rs::schema::SchemaId; + +#[derive(Debug, Clone, PartialEq)] +pub struct StorageDocument { + /// The id for this document. + pub(crate) id: DocumentId, + + /// The key-value mapping of this documents current view. + pub(crate) fields: Option, + + /// The id of the schema this document follows. + pub(crate) schema_id: SchemaId, + + /// The id of the current view of this document. + pub(crate) view_id: DocumentViewId, + + /// The public key of the author who created this document. + pub(crate) author: PublicKey, + + /// Flag indicating if document was deleted. + pub(crate) deleted: bool, +} + +impl AsDocument for StorageDocument { + /// Get the document id. + fn id(&self) -> &DocumentId { + &self.id + } + + /// Get the document view id. + fn view_id(&self) -> &DocumentViewId { + &self.view_id + } + + /// Get the document author's public key. + fn author(&self) -> &PublicKey { + &self.author + } + + /// Get the document schema. + fn schema_id(&self) -> &SchemaId { + &self.schema_id + } + + /// The key-value mapping of this documents current view. + fn fields(&self) -> Option<&DocumentViewFields> { + self.fields.as_ref() + } + + /// Returns true if this document has applied an UPDATE operation. + fn is_edited(&self) -> bool { + match self.fields() { + Some(fields) => fields + .iter() + .find(|(_, document_view_value)| { + &DocumentId::new(document_view_value.id()) != self.id() + }) + .is_none(), + None => true, + } + } + + /// Returns true if this document has processed a DELETE operation. + fn is_deleted(&self) -> bool { + self.deleted + } +} diff --git a/aquadoggo/src/db/types/entry.rs b/aquadoggo/src/db/types/entry.rs new file mode 100644 index 000000000..82e2daeb7 --- /dev/null +++ b/aquadoggo/src/db/types/entry.rs @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use p2panda_rs::entry::decode::decode_entry; +use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; +use p2panda_rs::entry::{EncodedEntry, LogId, SeqNum, Signature}; +use p2panda_rs::hash::Hash; +use p2panda_rs::identity::PublicKey; +use p2panda_rs::operation::EncodedOperation; + +use crate::db::models::EntryRow; + +/// A signed entry and it's encoded operation. Entries are the lowest level data type on the +/// p2panda network, they are signed by authors and form bamboo append only logs. The operation is +/// an entries' payload, it contains the data mutations which authors publish. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct StorageEntry { + /// PublicKey of this entry. + pub(crate) public_key: PublicKey, + + /// Used log for this entry. + pub(crate) log_id: LogId, + + /// Sequence number of this entry. + pub(crate) seq_num: SeqNum, + + /// Hash of skiplink Bamboo entry. + pub(crate) skiplink: Option, + + /// Hash of previous Bamboo entry. + pub(crate) backlink: Option, + + /// Byte size of payload. + pub(crate) payload_size: u64, + + /// Hash of payload. + pub(crate) payload_hash: Hash, + + /// Ed25519 signature of entry. + pub(crate) signature: Signature, + + /// Encoded entry bytes. + pub(crate) encoded_entry: EncodedEntry, + + /// Encoded entry bytes. + pub(crate) payload: Option, +} + +impl StorageEntry { + pub fn payload(&self) -> Option<&EncodedOperation> { + self.payload.as_ref() + } +} + +impl AsEntry for StorageEntry { + /// Returns public key of entry. + fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns log id of entry. + fn log_id(&self) -> &LogId { + &self.log_id + } + + /// Returns sequence number of entry. + fn seq_num(&self) -> &SeqNum { + &self.seq_num + } + + /// Returns hash of skiplink entry when given. + fn skiplink(&self) -> Option<&Hash> { + self.skiplink.as_ref() + } + + /// Returns hash of backlink entry when given. + fn backlink(&self) -> Option<&Hash> { + self.backlink.as_ref() + } + + /// Returns payload size of operation. + fn payload_size(&self) -> u64 { + self.payload_size + } + + /// Returns payload hash of operation. + fn payload_hash(&self) -> &Hash { + &self.payload_hash + } + + /// Returns signature of entry. + fn signature(&self) -> &Signature { + &self.signature + } +} + +impl AsEncodedEntry for StorageEntry { + /// Generates and returns hash of encoded entry. + fn hash(&self) -> Hash { + self.encoded_entry.hash() + } + + /// Returns entry as bytes. + fn into_bytes(&self) -> Vec { + self.encoded_entry.into_bytes() + } + + /// Returns payload size (number of bytes) of total encoded entry. + fn size(&self) -> u64 { + self.encoded_entry.size() + } +} + +/// `From` implementation for converting an `EntryRow` into a `StorageEntry`. This is needed when +/// retrieving entries from the database. The `sqlx` crate coerces returned entry rows into +/// `EntryRow` but we want them as `StorageEntry` which contains typed values. +impl From for StorageEntry { + fn from(entry_row: EntryRow) -> Self { + let encoded_entry = EncodedEntry::from_bytes( + &hex::decode(entry_row.entry_bytes) + .expect("Decode entry hex entry bytes from database"), + ); + let entry = decode_entry(&encoded_entry).expect("Decoding encoded entry from database"); + StorageEntry { + public_key: entry.public_key().to_owned(), + log_id: entry.log_id().to_owned(), + seq_num: entry.seq_num().to_owned(), + skiplink: entry.skiplink().cloned(), + backlink: entry.backlink().cloned(), + payload_size: entry.payload_size(), + payload_hash: entry.payload_hash().to_owned(), + signature: entry.signature().to_owned(), + encoded_entry, + payload: entry_row.payload_bytes.map(|payload| { + EncodedOperation::from_bytes( + &hex::decode(payload).expect("Decode entry payload from database"), + ) + }), + } + } +} diff --git a/aquadoggo/src/db/types/mod.rs b/aquadoggo/src/db/types/mod.rs new file mode 100644 index 000000000..76a65ae95 --- /dev/null +++ b/aquadoggo/src/db/types/mod.rs @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Structs representing data which has been retrieved from the store. +//! +//! As data coming from the db is trusted we construct these structs without validation. Compared +//! to their respective counterparts in `p2panda-rs` some additional values are also made +//! available. For example, all `StorageOperation`s contain the `DocumentId` of the document they +//! are associated with, this value is not encoded in an plain operation and must be derived from +//! other values stored in the database. +mod document; +mod entry; +mod operation; + +pub use document::StorageDocument; +pub use entry::StorageEntry; +pub use operation::StorageOperation; diff --git a/aquadoggo/src/db/types/operation.rs b/aquadoggo/src/db/types/operation.rs new file mode 100644 index 000000000..271911ead --- /dev/null +++ b/aquadoggo/src/db/types/operation.rs @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::identity::PublicKey; +use p2panda_rs::operation::traits::{AsOperation, WithPublicKey}; +use p2panda_rs::operation::{OperationAction, OperationFields, OperationId, OperationVersion}; +use p2panda_rs::schema::SchemaId; +use p2panda_rs::WithId; + +pub struct StorageOperation { + /// The id of the document this operation is part of. + pub(crate) document_id: DocumentId, + + /// Identifier of the operation. + pub(crate) id: OperationId, + + /// Version of this operation. + pub(crate) version: OperationVersion, + + /// Action of this operation. + pub(crate) action: OperationAction, + + /// Schema instance of this operation. + pub(crate) schema_id: SchemaId, + + /// Previous operations field. + pub(crate) previous: Option, + + /// Operation fields. + pub(crate) fields: Option, + + /// The public key of the key pair used to publish this operation. + pub(crate) public_key: PublicKey, +} + +impl WithPublicKey for StorageOperation { + /// Returns the public key of the author of this operation. + fn public_key(&self) -> &PublicKey { + &self.public_key + } +} + +impl WithId for StorageOperation { + /// Returns the identifier for this operation. + fn id(&self) -> &OperationId { + &self.id + } +} + +impl WithId for StorageOperation { + /// Returns the identifier for this operation. + fn id(&self) -> &DocumentId { + &self.document_id + } +} + +impl AsOperation for StorageOperation { + /// Returns action type of operation. + fn action(&self) -> OperationAction { + self.action.to_owned() + } + + /// Returns schema id of operation. + fn schema_id(&self) -> SchemaId { + self.schema_id.to_owned() + } + + /// Returns version of operation. + fn version(&self) -> OperationVersion { + self.version.to_owned() + } + + /// Returns application data fields of operation. + fn fields(&self) -> Option { + self.fields.clone() + } + + /// Returns vector of this operation's previous operation ids. + fn previous(&self) -> Option { + self.previous.clone() + } +} diff --git a/aquadoggo/src/domain.rs b/aquadoggo/src/domain.rs index c471b07f6..65408515b 100644 --- a/aquadoggo/src/domain.rs +++ b/aquadoggo/src/domain.rs @@ -8,15 +8,14 @@ use bamboo_rs_core_ed25519_yasmf::entry::is_lipmaa_required; use p2panda_rs::document::{DocumentId, DocumentViewId}; use p2panda_rs::entry::decode::decode_entry; use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; -use p2panda_rs::entry::{EncodedEntry, Entry, LogId, SeqNum}; -use p2panda_rs::hash::Hash; +use p2panda_rs::entry::{EncodedEntry, LogId, SeqNum}; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::plain::PlainOperation; use p2panda_rs::operation::traits::AsOperation; use p2panda_rs::operation::validate::validate_operation_with_entry; use p2panda_rs::operation::{EncodedOperation, OperationAction}; use p2panda_rs::schema::Schema; -use p2panda_rs::storage_provider::traits::{AsStorageLog, StorageProvider}; +use p2panda_rs::storage_provider::traits::{EntryStore, LogStore, OperationStore}; use p2panda_rs::Human; use crate::graphql::client::NextArguments; @@ -58,7 +57,7 @@ use crate::validation::{ /// - get the latest seq num for this public key and log and safely increment /// /// Finally, return next arguments. -pub async fn next_args( +pub async fn next_args( store: &S, public_key: &PublicKey, document_view_id: Option<&DocumentViewId>, @@ -102,7 +101,7 @@ pub async fn next_args( ///////////////////////// // Retrieve the log_id for the found document_id and public_key. - let log_id = store.get(public_key, &document_id).await?; + let log_id = store.get_log_id(public_key, &document_id).await?; // Check if an existing log id was found for this public key and document. match log_id { @@ -203,7 +202,7 @@ pub async fn next_args( /// ## Compute and return next entry arguments /// /// - Done! -pub async fn publish( +pub async fn publish( store: &S, schema: &Schema, encoded_entry: &EncodedEntry, @@ -238,18 +237,18 @@ pub async fn publish( None => None, }; - let skiplink_params: Option<(Entry, Hash)> = skiplink.map(|entry| { + let skiplink_params = skiplink.map(|entry| { let hash = entry.hash(); - (entry.into(), hash) + (entry, hash) }); - let backlink_params: Option<(Entry, Hash)> = backlink.map(|entry| { + let backlink_params = backlink.map(|entry| { let hash = entry.hash(); - (entry.into(), hash) + (entry, hash) }); // Perform validation of the entry and it's operation. - let operation = validate_operation_with_entry( + let (operation, operation_id) = validate_operation_with_entry( &entry, encoded_entry, skiplink_params.as_ref().map(|(entry, hash)| (entry, hash)), @@ -326,8 +325,9 @@ pub async fn publish( // If the entries' seq num is 1 we insert a new log here. if entry.seq_num().is_first() { - let log = S::StorageLog::new(public_key, &operation.schema_id(), &document_id, log_id); - store.insert_log(log).await?; + store + .insert_log(log_id, public_key, &operation.schema_id(), &document_id) + .await?; } /////////////////////////////// @@ -340,7 +340,9 @@ pub async fn publish( .await?; // Insert the operation into the store. - store.insert_operation(&operation, &document_id).await?; + store + .insert_operation(&operation_id, public_key, &operation, &document_id) + .await?; Ok(next_args) } @@ -352,14 +354,14 @@ pub async fn publish( /// - any of the operations contained in the view id _don't_ exist in the store /// - any of the operations contained in the view id return a different document id than any of the /// others -pub async fn get_checked_document_id_for_view_id( +pub async fn get_checked_document_id_for_view_id( store: &S, view_id: &DocumentViewId, ) -> AnyhowResult { let mut found_document_ids: HashSet = HashSet::new(); for operation in view_id.iter() { // If any operation can't be found return an error at this point already. - let document_id = store.get_document_by_operation_id(operation).await?; + let document_id = store.get_document_id_by_operation_id(operation).await?; ensure!( document_id.is_some(), @@ -398,16 +400,16 @@ mod tests { Operation, OperationAction, OperationBuilder, OperationId, OperationValue, }; use p2panda_rs::schema::{FieldType, Schema}; - use p2panda_rs::storage_provider::traits::{EntryStore, EntryWithOperation, LogStore}; + use p2panda_rs::storage_provider::traits::{EntryStore, LogStore}; use p2panda_rs::test_utils::constants::{test_fields, PRIVATE_KEY}; - use p2panda_rs::test_utils::db::test_db::{ - populate_store, send_to_store, test_db_config, PopulateDatabaseConfig, - }; - use p2panda_rs::test_utils::db::{MemoryStore, StorageEntry}; use p2panda_rs::test_utils::fixtures::{ - create_operation, delete_operation, key_pair, operation, public_key, + create_operation, delete_operation, key_pair, operation, populate_store_config, public_key, random_document_view_id, random_hash, schema, update_operation, }; + use p2panda_rs::test_utils::memory_store::helpers::{ + populate_store, send_to_store, PopulateStoreConfig, + }; + use p2panda_rs::test_utils::memory_store::{MemoryStore, StorageEntry}; use rstest::rstest; use crate::graphql::client::NextArguments; @@ -532,9 +534,9 @@ mod tests { schema: Schema, #[case] entries_to_remove: &[LogIdAndSeqNum], #[case] entry_to_publish: LogIdAndSeqNum, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(8, 1, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (key_pairs, _) = populate_store(&store, &config).await; @@ -558,13 +560,13 @@ mod tests { remove_entries(&store, &public_key, entries_to_remove); // Publish the latest entry again and see what happens. - let operation = next_entry.payload().unwrap(); + let operation = next_entry.payload.unwrap(); let result = publish( &store, &schema, - &next_entry.clone().into(), - &decode_operation(operation).unwrap(), - operation, + &next_entry.encoded_entry, + &decode_operation(&operation).unwrap(), + &operation, ) .await; @@ -629,9 +631,9 @@ mod tests { // The previous operations described by their log id and seq number (log_id, seq_num) #[case] previous: &[LogIdAndSeqNum], #[case] key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(8, 2, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (key_pairs, documents) = populate_store(&store, &config).await; @@ -762,9 +764,9 @@ mod tests { #[case] operations_to_remove: &[LogIdAndSeqNum], #[case] document_view_id: &[LogIdAndSeqNum], #[case] key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(8, 2, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (key_pairs, _) = populate_store(&store, &config).await; @@ -834,11 +836,11 @@ mod tests { ) { let store = MemoryStore::default(); // Populate the db with the number of entries defined in the test params. - let config = PopulateDatabaseConfig { + let config = PopulateStoreConfig { no_of_entries, no_of_logs: 1, no_of_public_keys: 1, - ..PopulateDatabaseConfig::default() + ..PopulateStoreConfig::default() }; let (key_pairs, _) = populate_store(&store, &config).await; @@ -900,9 +902,9 @@ mod tests { #[tokio::test] async fn gets_next_args_other_cases( public_key: PublicKey, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(7, 1, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await; @@ -966,9 +968,9 @@ mod tests { schema: Schema, #[case] log_id: LogId, #[case] key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(2, 1, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await; @@ -1017,7 +1019,7 @@ mod tests { // For non error cases we test that there is a log for the updated document. let log = store - .get(&author_performing_update, document_id) + .get_log_id(&author_performing_update, document_id) .await .unwrap(); @@ -1055,9 +1057,9 @@ mod tests { #[case] log_id: LogId, #[case] key_pair: KeyPair, operation: Operation, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(1, 2, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let _ = populate_store(&store, &config).await; @@ -1091,7 +1093,7 @@ mod tests { let document_id = encoded_entry.hash().into(); let retrieved_log_id = store - .get(&public_key, &document_id) + .get_log_id(&public_key, &document_id) .await .expect("Retrieve log id for document"); @@ -1111,9 +1113,9 @@ mod tests { async fn publish_to_deleted_documents( schema: Schema, #[case] key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(2, 1, 1, true)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await; @@ -1167,9 +1169,9 @@ mod tests { #[tokio::test] async fn next_args_deleted_documents( #[case] key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(3, 1, 1, true)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await; @@ -1260,9 +1262,9 @@ mod tests { #[tokio::test] async fn next_args_max_seq_num_reached( key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(2, 1, 1, false)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let _ = populate_store(&store, &config).await; @@ -1280,12 +1282,12 @@ mod tests { &SeqNum::new(u64::MAX).unwrap(), Some(&random_hash()), Some(&random_hash()), - entry_two.payload().unwrap(), + entry_two.payload.as_ref().unwrap(), &key_pair, ) .unwrap(); - let entry = StorageEntry::new(&encoded_entry, entry_two.payload()); + let entry = StorageEntry::new(&encoded_entry, entry_two.payload.as_ref()); store .entries @@ -1304,9 +1306,9 @@ mod tests { async fn publish_max_seq_num_reached( schema: Schema, key_pair: KeyPair, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(2, 1, 1, false)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let _ = populate_store(&store, &config).await; @@ -1328,12 +1330,12 @@ mod tests { &SeqNum::new(18446744073709551611).unwrap(), Some(&random_hash()), Some(&random_hash()), - entry_two.payload().unwrap(), + entry_two.payload.as_ref().unwrap(), &key_pair, ) .unwrap(); - let skiplink = StorageEntry::new(&encoded_entry, entry_two.payload()); + let skiplink = StorageEntry::new(&encoded_entry, entry_two.payload.as_ref()); store .entries .lock() @@ -1346,12 +1348,12 @@ mod tests { &SeqNum::new(u64::MAX - 1).unwrap(), None, Some(&random_hash()), - entry_two.payload().unwrap(), + entry_two.payload.as_ref().unwrap(), &key_pair, ) .unwrap(); - let backlink = StorageEntry::new(&encoded_entry, entry_two.payload()); + let backlink = StorageEntry::new(&encoded_entry, entry_two.payload.as_ref()); store .entries .lock() @@ -1364,13 +1366,13 @@ mod tests { &SeqNum::new(u64::MAX).unwrap(), Some(&skiplink.hash()), Some(&backlink.hash()), - entry_two.payload().unwrap(), + entry_two.payload.as_ref().unwrap(), &key_pair, ) .unwrap(); // Publish the MAX_SEQ_NUM entry - let operation = entry_two.payload().unwrap(); + let operation = &entry_two.payload.unwrap(); let result = publish( &store, &schema, @@ -1381,10 +1383,7 @@ mod tests { .await; // try and get the MAX_SEQ_NUM entry again (it shouldn't be there) - let entry_at_max_seq_num = store - .get_entry_by_hash(&encoded_entry.hash()) - .await - .unwrap(); + let entry_at_max_seq_num = store.get_entry(&encoded_entry.hash()).await.unwrap(); // We expect the entry we published not to have been stored in the db assert!(entry_at_max_seq_num.is_none()); diff --git a/aquadoggo/src/graphql/client/dynamic_query.rs b/aquadoggo/src/graphql/client/dynamic_query.rs index 776b74e53..bceee2eae 100644 --- a/aquadoggo/src/graphql/client/dynamic_query.rs +++ b/aquadoggo/src/graphql/client/dynamic_query.rs @@ -9,13 +9,14 @@ use async_recursion::async_recursion; use async_trait::async_trait; use futures::future; use log::{debug, error, info}; +use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::{DocumentId, DocumentView, DocumentViewId}; use p2panda_rs::operation::OperationValue; use p2panda_rs::schema::SchemaId; -use p2panda_rs::storage_provider::traits::{DocumentStore, OperationStore}; +use p2panda_rs::storage_provider::traits::DocumentStore; use p2panda_rs::Human; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::graphql::client::dynamic_types; use crate::graphql::client::dynamic_types::DocumentMeta; use crate::graphql::client::utils::validate_view_matches_schema; @@ -131,7 +132,7 @@ impl DynamicQuery { ) -> ServerResult> { info!("Handling collection query for {}", schema_id.display()); - let store = ctx.data_unchecked::(); + let store = ctx.data_unchecked::(); // Retrieve all documents for schema from storage. let documents = store @@ -140,9 +141,21 @@ impl DynamicQuery { .map_err(|err| ServerError::new(err.to_string(), None))?; // Assemble views async - let documents_graphql_values = documents.into_iter().map(|view| async move { + let documents_graphql_values = documents.into_iter().map(|document| async move { let selected_fields = ctx.field().selection_set().collect(); - self.document_response(view, ctx, selected_fields).await + match document.view() { + Some(view) => { + self.document_response( + document.id(), + &view, + document.schema_id(), + ctx, + selected_fields, + ) + .await + } + None => Ok(Value::Null), + } }); Ok(Some(Value::List( @@ -167,14 +180,14 @@ impl DynamicQuery { ) -> ServerResult { debug!("Fetching {} from store", document_id.display()); - let store = ctx.data_unchecked::(); - let view = store.get_document_by_id(&document_id).await.unwrap(); - match view { - Some(view) => { + let store = ctx.data_unchecked::(); + let document = store.get_document(&document_id).await.unwrap(); + match document { + Some(document) => { // Validate the document's schema if the `validate_schema` argument is set. if let Some(expected_schema_id) = validate_schema { validate_view_matches_schema( - view.id(), + document.view_id(), expected_schema_id, store, Some(ctx.item.pos), @@ -182,7 +195,15 @@ impl DynamicQuery { .await?; } - self.document_response(view, ctx, selected_fields).await + // We can unwrap the document view here as documents returned from this store method all contain views. + self.document_response( + document.id(), + &document.view().unwrap(), + document.schema_id(), + ctx, + selected_fields, + ) + .await } None => { error!("No view found for document {}", document_id.as_str()); @@ -191,7 +212,7 @@ impl DynamicQuery { } } - /// Fetches the given document view id from the store and returns it as a GraphQL value. + /// Fetches a document from the store by view id and returns it as a GraphQL value. /// /// Recurses into relations when those are selected in `selected_fields`. /// @@ -207,24 +228,33 @@ impl DynamicQuery { ) -> ServerResult { debug!("Fetching {} from store", document_view_id.display()); - let store = ctx.data_unchecked::(); - let view = store - .get_document_view_by_id(&document_view_id) + let store = ctx.data_unchecked::(); + let document = store + .get_document_by_view_id(&document_view_id) .await + // @TODO: Not sure why it's ok to unwrap here, needs checking and comment adding. .unwrap(); - match view { - Some(view) => { + match document { + Some(document) => { // Validate the document's schema if the `validate_schema` argument is set. if let Some(expected_schema_id) = validate_schema { validate_view_matches_schema( - view.id(), + document.view_id(), expected_schema_id, store, Some(ctx.item.pos), ) .await?; } - self.document_response(view, ctx, selected_fields).await + // We can unwrap the document view here as documents returned from this store method all contain views. + self.document_response( + document.id(), + &document.view().unwrap(), + document.schema_id(), + ctx, + selected_fields, + ) + .await } None => Ok(Value::Null), } @@ -236,7 +266,9 @@ impl DynamicQuery { #[async_recursion] async fn document_response( &self, - view: DocumentView, + document_id: &DocumentId, + document_view: &DocumentView, + schema_id: &SchemaId, ctx: &Context<'_>, selected_fields: Vec>, ) -> ServerResult { @@ -248,48 +280,30 @@ impl DynamicQuery { match field.name() { "__typename" => { - let store = ctx.data_unchecked::(); - let schema_id = store - .get_schema_by_document_view(view.id()) - .await - .map_err(|err| ServerError::new(err.to_string(), None))? - .unwrap() - .to_string(); - document_fields.insert(response_key, Value::String(schema_id)); + document_fields.insert(response_key, Value::String(schema_id.to_string())); } dynamic_types::document::META_FIELD => { - let store = ctx.data_unchecked::(); - let document_id = store - .get_document_by_operation_id(view.id().graph_tips().first().unwrap()) - .await - .map_err(|err| ServerError::new(err.to_string(), None))? - .unwrap(); document_fields.insert( response_key, - DocumentMeta::resolve(field, Some(&document_id), Some(view.id()))?, + DocumentMeta::resolve(field, Some(&document_id), Some(document_view.id()))?, ); } dynamic_types::document::FIELDS_FIELD => { let subselection = field.selection_set().collect(); document_fields.insert( response_key, - self.document_fields_response(view.clone(), ctx, subselection) + self.document_fields_response(document_view, schema_id, ctx, subselection) .await?, ); } - _ => { - let store = ctx.data_unchecked::(); - let schema_id = store - .get_schema_by_document_view(view.id()) - .await - .map_err(|err| ServerError::new(err.to_string(), None))? - .unwrap() - .to_string(); - Err(ServerError::new( - format!("Field '{}' does not exist on {}", field.name(), schema_id,), - None, - ))? - } + _ => Err(ServerError::new( + format!( + "Field '{}' does not exist on {}", + field.name(), + schema_id.to_string() + ), + None, + ))?, } } @@ -302,17 +316,11 @@ impl DynamicQuery { #[async_recursion] async fn document_fields_response( &self, - view: DocumentView, + document_view: &DocumentView, + schema_id: &SchemaId, ctx: &Context<'_>, selected_fields: Vec>, ) -> ServerResult { - let store = ctx.data_unchecked::(); - let schema_id = store - .get_schema_by_document_view(view.id()) - .await - .map_err(|err| ServerError::new(err.to_string(), None))? - .unwrap(); - let schema_provider = ctx.data_unchecked::(); // Unwrap because this schema id comes from the store. let schema = schema_provider.get(&schema_id).await.unwrap(); @@ -347,7 +355,7 @@ impl DynamicQuery { } // Retrieve the current field's value from the document view. Unwrap because we have // checked that this field exists on the schema. - let document_view_value = view.get(selected_field.name()).unwrap(); + let document_view_value = document_view.get(selected_field.name()).unwrap(); // Collect any further fields that have been selected on the current field. let next_selection: Vec> = diff --git a/aquadoggo/src/graphql/client/mutation.rs b/aquadoggo/src/graphql/client/mutation.rs index cd25df19f..3b7ff4ce2 100644 --- a/aquadoggo/src/graphql/client/mutation.rs +++ b/aquadoggo/src/graphql/client/mutation.rs @@ -9,7 +9,7 @@ use p2panda_rs::operation::traits::Schematic; use p2panda_rs::operation::{EncodedOperation, OperationId}; use crate::bus::{ServiceMessage, ServiceSender}; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::domain::publish; use crate::graphql::client::NextArguments; use crate::graphql::scalars; @@ -35,7 +35,7 @@ impl ClientMutationRoot { )] operation: scalars::EncodedOperationScalar, ) -> Result { - let store = ctx.data::()?; + let store = ctx.data::()?; let tx = ctx.data::()?; let schema_provider = ctx.data::()?; @@ -97,7 +97,7 @@ mod tests { use p2panda_rs::operation::{EncodedOperation, OperationValue}; use p2panda_rs::schema::{FieldType, Schema, SchemaId}; use p2panda_rs::serde::serialize_value; - use p2panda_rs::storage_provider::traits::{EntryStore, EntryWithOperation}; + use p2panda_rs::storage_provider::traits::EntryStore; use p2panda_rs::test_utils::constants::{HASH, PRIVATE_KEY}; use p2panda_rs::test_utils::fixtures::{ create_operation, delete_operation, encoded_entry, encoded_operation, @@ -755,11 +755,10 @@ mod tests { .await .unwrap(); let entry = entries.first().unwrap(); - let encoded_entry: EncodedEntry = entry.to_owned().into(); // Prepare a publish entry request for the entry. let publish_request = publish_request( - &encoded_entry.to_string(), + &entry.encoded_entry.to_string(), &entry.payload().unwrap().to_string(), ); diff --git a/aquadoggo/src/graphql/client/static_query.rs b/aquadoggo/src/graphql/client/static_query.rs index 0f1ce7e23..cb06f27e1 100644 --- a/aquadoggo/src/graphql/client/static_query.rs +++ b/aquadoggo/src/graphql/client/static_query.rs @@ -5,7 +5,7 @@ use async_graphql::{Context, Object, Result}; use p2panda_rs::document::DocumentViewId; use p2panda_rs::identity::PublicKey; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::domain::next_args; use crate::graphql::client::NextArguments; use crate::graphql::scalars; @@ -34,7 +34,7 @@ impl StaticQuery { document_view_id: Option, ) -> Result { // Access the store from context. - let store = ctx.data::()?; + let store = ctx.data::()?; // Convert and validate passed parameters. let public_key: PublicKey = public_key.into(); diff --git a/aquadoggo/src/graphql/client/utils.rs b/aquadoggo/src/graphql/client/utils.rs index b2ed62ecd..30c534c7a 100644 --- a/aquadoggo/src/graphql/client/utils.rs +++ b/aquadoggo/src/graphql/client/utils.rs @@ -4,7 +4,7 @@ use async_graphql::{Pos, ServerError, ServerResult}; use p2panda_rs::document::DocumentViewId; use p2panda_rs::schema::SchemaId; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; /// Validate that the given view matches the given schema. /// @@ -12,7 +12,7 @@ use crate::db::provider::SqlStorage; pub async fn validate_view_matches_schema( document_view_id: &DocumentViewId, schema_id: &SchemaId, - store: &SqlStorage, + store: &SqlStore, pos: Option, ) -> ServerResult<()> { let document_schema_id = store diff --git a/aquadoggo/src/graphql/replication/client.rs b/aquadoggo/src/graphql/replication/client.rs index a6966167a..884d14e23 100644 --- a/aquadoggo/src/graphql/replication/client.rs +++ b/aquadoggo/src/graphql/replication/client.rs @@ -8,7 +8,7 @@ use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; use serde::{Deserialize, Serialize}; -use crate::db::stores::StorageEntry; +use crate::db::types::StorageEntry; use crate::graphql::pagination::Paginated; use crate::graphql::replication::response::EncodedEntryAndOperation; use crate::graphql::scalars; diff --git a/aquadoggo/src/graphql/replication/query.rs b/aquadoggo/src/graphql/replication/query.rs index cded60c5b..1f2a3c213 100644 --- a/aquadoggo/src/graphql/replication/query.rs +++ b/aquadoggo/src/graphql/replication/query.rs @@ -7,7 +7,7 @@ use p2panda_rs::entry::traits::AsEntry; use p2panda_rs::entry::SeqNum; use p2panda_rs::storage_provider::traits::EntryStore; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::graphql::replication::response::EncodedEntryAndOperation; use crate::graphql::scalars; @@ -33,8 +33,8 @@ impl ReplicationRoot { ctx: &Context<'a>, hash: scalars::EntryHashScalar, ) -> Result { - let store = ctx.data::()?; - let result = store.get_entry_by_hash(&hash.clone().into()).await?; + let store = ctx.data::()?; + let result = store.get_entry(&hash.clone().into()).await?; match result { Some(inner) => Ok(EncodedEntryAndOperation::from(inner)), @@ -55,7 +55,7 @@ impl ReplicationRoot { #[graphql(name = "publicKey", desc = "Public key of the entry author")] public_key: scalars::PublicKeyScalar, ) -> Result { - let store = ctx.data::()?; + let store = ctx.data::()?; let result = store .get_entry_at_seq_num(&public_key.into(), &log_id.into(), &seq_num.into()) @@ -86,7 +86,7 @@ impl ReplicationRoot { first: Option, after: Option, ) -> Result { - let store = ctx.data::()?; + let store = ctx.data::()?; query( after, @@ -158,8 +158,8 @@ impl CursorType for scalars::SeqNumScalar { mod tests { use async_graphql::{EmptyMutation, EmptySubscription, Request, Schema}; use p2panda_rs::hash::Hash; - use p2panda_rs::test_utils::db::test_db::{populate_store, PopulateDatabaseConfig}; use p2panda_rs::test_utils::fixtures::random_hash; + use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; use rstest::rstest; use crate::db::stores::test_utils::{ @@ -319,7 +319,7 @@ mod tests { let (key_pairs, _) = populate_store( &billie_db.store, - &PopulateDatabaseConfig { + &PopulateStoreConfig { no_of_entries: entries_in_log, no_of_logs: 1, no_of_public_keys: 1, diff --git a/aquadoggo/src/graphql/replication/response.rs b/aquadoggo/src/graphql/replication/response.rs index 5a419f581..ed87e977d 100644 --- a/aquadoggo/src/graphql/replication/response.rs +++ b/aquadoggo/src/graphql/replication/response.rs @@ -7,11 +7,11 @@ use async_graphql::{ComplexObject, Context, SimpleObject}; use p2panda_rs::entry::decode::decode_entry; use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; use p2panda_rs::entry::EncodedEntry; -use p2panda_rs::storage_provider::traits::{EntryStore, EntryWithOperation}; +use p2panda_rs::storage_provider::traits::EntryStore; use serde::{Deserialize, Serialize}; -use crate::db::provider::SqlStorage; -use crate::db::stores::StorageEntry; +use crate::db::types::StorageEntry; +use crate::db::SqlStore; use crate::graphql::scalars; /// Encoded and signed entry with its regarding encoded operation payload. @@ -32,7 +32,7 @@ impl EncodedEntryAndOperation { &self, ctx: &Context<'a>, ) -> async_graphql::Result> { - let store = ctx.data::()?; + let store = ctx.data::()?; // Decode entry let entry_encoded: EncodedEntry = self.entry.clone().into(); @@ -70,7 +70,7 @@ impl TryFrom for StorageEntry { .operation .ok_or_else(|| anyhow!("Storage entry requires operation to be given"))?; let encoded_entry = encoded.entry; - let entry = decode_entry(&encoded_entry.clone().into())?; + let entry = decode_entry(&EncodedEntry::from(encoded_entry.clone()))?; let storage_entry = StorageEntry { public_key: entry.public_key().to_owned(), diff --git a/aquadoggo/src/graphql/schema.rs b/aquadoggo/src/graphql/schema.rs index 794f3319c..183de4a04 100644 --- a/aquadoggo/src/graphql/schema.rs +++ b/aquadoggo/src/graphql/schema.rs @@ -9,7 +9,7 @@ use p2panda_rs::Human; use tokio::sync::Mutex; use crate::bus::ServiceSender; -use crate::db::provider::SqlStorage; +use crate::db::SqlStore; use crate::graphql::client::{ClientMutationRoot, ClientRoot}; use crate::graphql::replication::ReplicationRoot; use crate::schema::{save_static_schemas, SchemaProvider}; @@ -30,7 +30,7 @@ pub type RootSchema = Schema; /// Builds the root schema that can handle all GraphQL requests from clients (Client API) or other /// nodes (Node API). pub fn build_root_schema( - store: SqlStorage, + store: SqlStore, tx: ServiceSender, schema_provider: SchemaProvider, ) -> RootSchema { @@ -78,7 +78,7 @@ type GraphQLSchemas = Arc>>; #[derive(Clone, Debug)] pub struct GraphQLSharedData { /// Database interface. - store: SqlStorage, + store: SqlStore, /// Communication bus interface to send messages to other services. tx: ServiceSender, @@ -115,11 +115,7 @@ pub struct GraphQLSchemaManager { impl GraphQLSchemaManager { /// Returns a new instance of `GraphQLSchemaManager`. - pub async fn new( - store: SqlStorage, - tx: ServiceSender, - schema_provider: SchemaProvider, - ) -> Self { + pub async fn new(store: SqlStore, tx: ServiceSender, schema_provider: SchemaProvider) -> Self { let schemas = Arc::new(Mutex::new(Vec::new())); let shared = GraphQLSharedData { store, diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index 4065a1b81..346acf32b 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -92,7 +92,7 @@ pub async fn materializer_service( // Resolve document id of regarding operation match context .store - .get_document_by_operation_id(&operation_id) + .get_document_id_by_operation_id(&operation_id) .await .unwrap_or_else(|_| { panic!( @@ -138,14 +138,15 @@ pub async fn materializer_service( mod tests { use std::time::Duration; + use p2panda_rs::document::traits::AsDocument; use p2panda_rs::entry::traits::AsEncodedEntry; use p2panda_rs::identity::KeyPair; use p2panda_rs::operation::{Operation, OperationId, OperationValue}; use p2panda_rs::schema::FieldType; use p2panda_rs::storage_provider::traits::DocumentStore; use p2panda_rs::test_utils::constants::SCHEMA_ID; - use p2panda_rs::test_utils::db::test_db::send_to_store; use p2panda_rs::test_utils::fixtures::{key_pair, operation, operation_fields, schema}; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use rstest::rstest; use tokio::sync::{broadcast, oneshot}; use tokio::task; @@ -175,12 +176,7 @@ mod tests { let first_operation_id: OperationId = document_id.to_string().parse().unwrap(); // We expect that the database does not contain any materialized document yet - assert!(db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .is_none()); + assert!(db.store.get_document(document_id).await.unwrap().is_none()); // Prepare arguments for service let context = Context::new( @@ -222,13 +218,13 @@ mod tests { // Check database for materialized documents let document = db .store - .get_document_by_id(document_id) + .get_document(document_id) .await .unwrap() .expect("We expect that the document is `Some`"); assert_eq!(document.id().to_string(), document_id.to_string()); assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), + document.get("name").unwrap().to_owned(), OperationValue::String("panda".into()) ); }); @@ -292,13 +288,13 @@ mod tests { // Check database for materialized documents let document = db .store - .get_document_by_id(document_id) + .get_document(document_id) .await .unwrap() .expect("We expect that the document is `Some`"); assert_eq!(document.id().to_string(), document_id.to_string()); assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), + document.get("name").unwrap().to_owned(), OperationValue::String("panda".into()) ); }); @@ -391,13 +387,14 @@ mod tests { // Check database for materialized documents let document = db .store - .get_document_by_id(document_id) + .get_document(document_id) .await .unwrap() .expect("We expect that the document is `Some`"); - assert_eq!(document.id(), &entry_encoded.hash().into()); + + assert_eq!(document.id(), document_id); assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), + document.get("name").unwrap().to_owned(), OperationValue::String("panda123".into()) ); }); @@ -465,7 +462,7 @@ mod tests { // Check database for materialized documents let document = db .store - .get_document_by_id(&entry_encoded.hash().into()) + .get_document(&entry_encoded.hash().into()) .await .unwrap() .expect("We expect that the document is `Some`"); diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index 7fad76969..df5eebbdd 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -1,6 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use log::debug; +use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::DocumentViewId; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::DocumentStore; @@ -25,11 +26,11 @@ use crate::materializer::TaskInput; pub async fn dependency_task(context: Context, input: TaskInput) -> TaskResult { debug!("Working on {}", input); - // Here we retrive the document view by document view id. - let document_view = match &input.document_view_id { + // Here we retrieve the document by document view id. + let document = match &input.document_view_id { Some(view_id) => context .store - .get_document_view_by_id(view_id) + .get_document_by_view_id(view_id) .await .map_err(|err| { TaskError::Critical(err.to_string()) @@ -39,24 +40,28 @@ pub async fn dependency_task(context: Context, input: TaskInput) -> TaskResult Err(TaskError::Critical("Missing document_view_id in task input".into())), }?; - let document_view = match document_view { - Some(document_view) => { + let document = match document { + Some(document) => { debug!( - "Document view retrieved from storage with id: {}", - document_view.id() + "Document retrieved from storage with view id: {}", + document.view_id() ); - Ok(document_view) + Ok(document) } - // If no document view for the id passed into this task could be retrieved then this - // document has been deleted or the document view id was invalid. As "dependency" tasks - // are only dispatched after a successful "reduce" task, neither `None` case should + // If no document with the view for the id passed into this task could be retrieved then + // this document has been deleted or the document view does not exist. As "dependency" + // tasks are only dispatched after a successful "reduce" task, neither `None` case should // happen, so this is a critical error. None => Err(TaskError::Critical(format!( - "Expected document view {} not found in store", + "Expected document with view {} not found in store", &input.document_view_id.unwrap() ))), }?; + // We can unwrap the view here as only documents with views (meaning they are not deleted) are + // returned from the store method above. + let document_view = document.view().unwrap(); + let mut next_tasks = Vec::new(); // First we handle all pinned or unpinned relations defined in this task's document view. @@ -106,18 +111,6 @@ pub async fn dependency_task(context: Context, input: TaskInput) -> TaskResult TaskResult next_tasks.push(schema_task()), @@ -153,7 +146,7 @@ async fn construct_relation_task( match context .store - .get_document_view_by_id(&document_view_id) + .get_document_by_view_id(&document_view_id) .await .map_err(|err| TaskError::Critical(err.to_string()))? { @@ -173,21 +166,22 @@ async fn construct_relation_task( #[cfg(test)] mod tests { + use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::{DocumentId, DocumentViewId}; use p2panda_rs::entry::traits::AsEncodedEntry; use p2panda_rs::identity::KeyPair; - use p2panda_rs::operation::traits::AsVerifiedOperation; use p2panda_rs::operation::{ - Operation, OperationBuilder, OperationValue, PinnedRelation, PinnedRelationList, Relation, - RelationList, + Operation, OperationBuilder, OperationId, OperationValue, PinnedRelation, + PinnedRelationList, Relation, RelationList, }; use p2panda_rs::schema::{FieldType, Schema, SchemaId}; use p2panda_rs::storage_provider::traits::{DocumentStore, OperationStore}; use p2panda_rs::test_utils::constants; - use p2panda_rs::test_utils::db::test_db::send_to_store; use p2panda_rs::test_utils::fixtures::{ key_pair, random_document_id, random_document_view_id, schema, schema_fields, }; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; + use p2panda_rs::WithId; use rstest::rstest; use crate::db::stores::test_utils::{ @@ -349,14 +343,9 @@ mod tests { } for document_id in &db.test_data.documents { - let document_view = db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .unwrap(); + let document = db.store.get_document(document_id).await.unwrap().unwrap(); - let input = TaskInput::new(None, Some(document_view.id().clone())); + let input = TaskInput::new(None, Some(document.view_id().clone())); let reduce_tasks = dependency_task(db.context.clone(), input) .await @@ -388,14 +377,9 @@ mod tests { // Here we have one materialised document, (we are calling it a child as we will // shortly be publishing parents) it contains relations which are not materialised yet // so should dispatch a reduce task for each one. - let document_view_of_child = db - .store - .get_document_by_id(&document_id) - .await - .unwrap() - .unwrap(); + let child_document = db.store.get_document(&document_id).await.unwrap().unwrap(); - let document_view_id_of_child = document_view_of_child.id(); + let document_view_id_of_child = child_document.view_id(); let schema = add_schema( &mut db, @@ -521,7 +505,10 @@ mod tests { .await .unwrap(); - let document_view_id: DocumentViewId = document_operations[1].id().clone().into(); + let document_view_id: DocumentViewId = + WithId::::id(&document_operations[1]) + .clone() + .into(); let input = TaskInput::new(None, Some(document_view_id.clone())); @@ -638,13 +625,14 @@ mod tests { .unwrap(); // Materialise the schema definition. - let document_view_id: DocumentViewId = entry_signed.hash().into(); - let input = TaskInput::new(None, Some(document_view_id.clone())); + let document_id: DocumentId = entry_signed.hash().into(); + let input = TaskInput::new(Some(document_id.clone()), None); reduce_task(db.context.clone(), input.clone()) .await .unwrap(); // Dispatch a dependency task for the schema definition. + let document_view_id: DocumentViewId = entry_signed.hash().into(); let input = TaskInput::new(None, Some(document_view_id)); let tasks = dependency_task(db.context.clone(), input) .await diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index 0724141fe..37a365df8 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -1,10 +1,14 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use std::convert::TryFrom; + use log::{debug, info}; -use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; -use p2panda_rs::operation::traits::AsVerifiedOperation; -use p2panda_rs::storage_provider::traits::{DocumentStore, OperationStore, StorageProvider}; -use p2panda_rs::Human; +use p2panda_rs::document::traits::AsDocument; +use p2panda_rs::document::{Document, DocumentBuilder, DocumentId, DocumentViewId}; +use p2panda_rs::operation::traits::{AsOperation, WithPublicKey}; +use p2panda_rs::operation::OperationId; +use p2panda_rs::storage_provider::traits::{DocumentStore, EntryStore, LogStore, OperationStore}; +use p2panda_rs::{Human, WithId}; use crate::context::Context; use crate::materializer::worker::{Task, TaskError, TaskResult}; @@ -51,9 +55,9 @@ pub async fn reduce_task(context: Context, input: TaskInput) -> TaskResult reduce_document_view(&context, view_id, operations).await?, + Some(view_id) => reduce_document_view(&context, view_id, &operations).await?, // If no document_view_id was passed, this is a document_id reduce task. - None => reduce_document(&context, operations).await?, + None => reduce_document(&context, &operations).await?, }; // Dispatch a "dependency" task if we created a new document view @@ -78,7 +82,7 @@ pub async fn reduce_task(context: Context, input: TaskInput) -> TaskResult( +async fn resolve_document_id( context: &Context, input: &TaskInput, ) -> Result, TaskError> { @@ -96,7 +100,7 @@ async fn resolve_document_id( context .store - .get_document_by_operation_id(operation_id) + .get_document_id_by_operation_id(operation_id) .await .map_err(|err| TaskError::Critical(err.to_string())) } @@ -110,14 +114,13 @@ async fn resolve_document_id( /// /// It returns `None` if either that document view reached "deleted" status or we don't have enough /// operations to materialise. -async fn reduce_document_view( +async fn reduce_document_view + WithPublicKey>( context: &Context, document_view_id: &DocumentViewId, - operations: Vec, + operations: &Vec, ) -> Result, TaskError> { - let document = match DocumentBuilder::new(operations) - .build_to_view_id(Some(document_view_id.to_owned())) - { + let document_builder: DocumentBuilder = operations.into(); + let document = match document_builder.build_to_view_id(Some(document_view_id.to_owned())) { Ok(document) => { // If the document was deleted, then we return nothing debug!( @@ -146,11 +149,15 @@ async fn reduce_document_view( // Insert the new document view into the database context .store - .insert_document_view(document.view().unwrap(), document.schema()) + .insert_document_view( + &document.view().unwrap(), + document.id(), + document.schema_id(), + ) .await .map_err(|err| TaskError::Critical(err.to_string()))?; - info!("Stored {} view {}", document, document.view_id()); + info!("Stored {} document view {}", document, document.view_id()); // Return the new view id to be used in the resulting dependency task Ok(Some(document.view_id().to_owned())) @@ -161,11 +168,11 @@ async fn reduce_document_view( /// /// It returns `None` if either that document view reached "deleted" status or we don't have enough /// operations to materialise. -async fn reduce_document( +async fn reduce_document + WithPublicKey>( context: &Context, - operations: Vec, + operations: &Vec, ) -> Result, TaskError> { - match DocumentBuilder::new(operations).build() { + match Document::try_from(operations) { Ok(document) => { // Insert this document into storage. If it already existed, this will update it's // current view @@ -209,16 +216,19 @@ async fn reduce_document( #[cfg(test)] mod tests { - use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; - use p2panda_rs::operation::traits::AsVerifiedOperation; + use std::convert::TryFrom; + + use p2panda_rs::document::materialization::build_graph; + use p2panda_rs::document::traits::AsDocument; + use p2panda_rs::document::{Document, DocumentBuilder, DocumentId, DocumentViewId}; use p2panda_rs::operation::OperationValue; use p2panda_rs::schema::Schema; use p2panda_rs::storage_provider::traits::{DocumentStore, OperationStore}; use p2panda_rs::test_utils::constants; - use p2panda_rs::test_utils::db::test_db::send_to_store; use p2panda_rs::test_utils::fixtures::{ operation, operation_fields, random_document_id, random_document_view_id, schema, }; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use rstest::rstest; use crate::db::stores::test_utils::{ @@ -248,10 +258,10 @@ mod tests { } for document_id in &db.test_data.documents { - let document_view = db.store.get_document_by_id(document_id).await.unwrap(); + let document = db.store.get_document(document_id).await.unwrap(); assert_eq!( - document_view.unwrap().get("username").unwrap().value(), + document.unwrap().get("username").unwrap(), &OperationValue::String("PANDA".to_string()) ) } @@ -305,9 +315,9 @@ mod tests { assert!(reduce_task(db.context.clone(), input).await.is_ok()); // The new view should exist and the document should refer to it. - let document_view = db.store.get_document_by_id(&document_id).await.unwrap(); + let document = db.store.get_document(document_id).await.unwrap(); assert_eq!( - document_view.unwrap().get("username").unwrap().value(), + document.unwrap().get("username").unwrap(), &OperationValue::String("meeeeeee".to_string()) ) }) @@ -320,52 +330,71 @@ mod tests { runner: TestDatabaseRunner, ) { runner.with_db_teardown(|db: TestDatabase| async move { + // The document id for a document who's operations are in the database but it hasn't been + // materialised yet. + let document_id = &db.test_data.documents[0]; + + // Get the operations. let document_operations = db .store - .get_operations_by_document_id(&db.test_data.documents[0]) + .get_operations_by_document_id(document_id) .await .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); - let mut sorted_document_operations = document.operations().clone(); - - let document_view_id: DocumentViewId = sorted_document_operations - .pop() + // Sort the operations into their ready for reducing order. + let document_builder = DocumentBuilder::from(&document_operations); + let sorted_document_operations = build_graph(&document_builder.operations()) .unwrap() - .id() - .clone() - .into(); - - let input = TaskInput::new(None, Some(document_view_id.clone())); + .sort() + .unwrap() + .sorted(); + // Reduce document to it's current view and insert into database. + let input = TaskInput::new(Some(document_id.clone()), None); assert!(reduce_task(db.context.clone(), input).await.is_ok()); - let document_view = db + // We should be able to query this specific view now and receive the expected state. + let document_view_id: DocumentViewId = + sorted_document_operations.get(1).unwrap().clone().0.into(); + let document = db .store - .get_document_view_by_id(&document_view_id) + .get_document_by_view_id(&document_view_id) .await .unwrap(); assert_eq!( - document_view.unwrap().get("username").unwrap().value(), + document.unwrap().get("username").unwrap(), &OperationValue::String("PANDA".to_string()) ); - // We didn't reduce this document_view_id so it shouldn't exist in the db. - let document_view_id: DocumentViewId = sorted_document_operations - .pop() - .unwrap() - .id() - .clone() - .into(); + // We didn't reduce this document_view so it shouldn't exist in the db. + let document_view_id: DocumentViewId = + sorted_document_operations.get(0).unwrap().clone().0.into(); - let document_view = db + let document = db .store - .get_document_view_by_id(&document_view_id) + .get_document_by_view_id(&document_view_id) .await .unwrap(); - assert!(document_view.is_none()); + assert!(document.is_none()); + + // But now if we do request an earlier view is materialised for this document... + let input = TaskInput::new(None, Some(document_view_id.clone())); + assert!(reduce_task(db.context.clone(), input).await.is_ok()); + + // Then we should now be able to query it and revieve the expected value. + let document = db + .store + .get_document_by_view_id(&document_view_id) + .await + .unwrap(); + + assert!(document.is_some()); + assert_eq!( + document.unwrap().get("username").unwrap(), + &OperationValue::String("bubu".to_string()) + ); }); } @@ -383,8 +412,8 @@ mod tests { } for document_id in &db.test_data.documents { - let document_view = db.store.get_document_by_id(document_id).await.unwrap(); - assert!(document_view.is_none()) + let document = db.store.get_document(document_id).await.unwrap(); + assert!(document.is_none()) } let document_operations = db @@ -393,7 +422,7 @@ mod tests { .await .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = Document::try_from(&document_operations).unwrap(); let input = TaskInput::new(None, Some(document.view_id().clone())); let tasks = reduce_task(db.context.clone(), input).await.unwrap(); diff --git a/aquadoggo/src/materializer/tasks/schema.rs b/aquadoggo/src/materializer/tasks/schema.rs index 5562761d9..933b9a442 100644 --- a/aquadoggo/src/materializer/tasks/schema.rs +++ b/aquadoggo/src/materializer/tasks/schema.rs @@ -1,13 +1,13 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use log::debug; +use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::DocumentViewId; use p2panda_rs::operation::OperationValue; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::DocumentStore; use crate::context::Context; -use crate::db::traits::SchemaStore; use crate::materializer::worker::{TaskError, TaskResult}; use crate::materializer::TaskInput; @@ -93,14 +93,16 @@ async fn get_related_schema_definitions( // Collect all schema definitions that use the targeted field definition let mut related_schema_definitions = vec![]; for schema in schema_definitions { - let fields_value = schema.fields().get("fields").unwrap().value(); + // We can unwrap the value here as all documents returned from the storage method above + // have a current view (they are not deleted). + let fields_value = schema.get("fields").unwrap(); if let OperationValue::PinnedRelationList(fields) = fields_value { if fields .iter() .any(|field_view_id| field_view_id == target_field_definition) { - related_schema_definitions.push(schema.id().clone()) + related_schema_definitions.push(schema.view_id().clone()) } else { continue; } @@ -119,13 +121,14 @@ async fn get_related_schema_definitions( #[cfg(test)] mod tests { use log::debug; + use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::{DocumentId, DocumentViewId}; use p2panda_rs::entry::traits::AsEncodedEntry; use p2panda_rs::identity::KeyPair; use p2panda_rs::operation::{OperationBuilder, OperationValue, PinnedRelationList}; use p2panda_rs::schema::{FieldType, Schema, SchemaId}; use p2panda_rs::storage_provider::traits::DocumentStore; - use p2panda_rs::test_utils::db::test_db::send_to_store; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use rstest::rstest; use crate::context::Context; @@ -163,11 +166,11 @@ mod tests { reduce_task(context.clone(), input).await.unwrap(); let field_view_id = db .store - .get_document_by_id(&field_definition_id) + .get_document(&field_definition_id) .await .unwrap() .unwrap() - .id() + .view_id() .to_owned(); debug!("Created field definition {}", &field_view_id); @@ -204,11 +207,11 @@ mod tests { reduce_task(context.clone(), input).await.unwrap(); let definition_view_id = db .store - .get_document_by_id(&schema_definition_id) + .get_document(&schema_definition_id) .await .unwrap() .unwrap() - .id() + .view_id() .to_owned(); debug!("Created schema definition {}", definition_view_id); diff --git a/aquadoggo/src/node.rs b/aquadoggo/src/node.rs index 03aec1b41..2680bd457 100644 --- a/aquadoggo/src/node.rs +++ b/aquadoggo/src/node.rs @@ -5,8 +5,7 @@ use anyhow::Result; use crate::bus::ServiceMessage; use crate::config::Configuration; use crate::context::Context; -use crate::db::provider::SqlStorage; -use crate::db::traits::SchemaStore; +use crate::db::SqlStore; use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; use crate::http::http_service; use crate::manager::ServiceManager; @@ -55,7 +54,7 @@ impl Node { .expect("Could not initialize database"); // Prepare storage and schema providers using connection pool. - let store = SqlStorage::new(pool.clone()); + let store = SqlStore::new(pool.clone()); let schemas = SchemaProvider::new(store.get_all_schema().await.unwrap()); // Create service manager with shared data between services. diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index d597173a2..463f62c34 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -7,18 +7,17 @@ use anyhow::{anyhow, Result}; use bamboo_rs_core_ed25519_yasmf::verify::verify_batch; use log::{debug, error, trace, warn}; use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; -use p2panda_rs::entry::EncodedEntry; use p2panda_rs::entry::LogId; use p2panda_rs::entry::SeqNum; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::decode::decode_operation; use p2panda_rs::operation::traits::Schematic; -use p2panda_rs::storage_provider::traits::{EntryStore, EntryWithOperation}; +use p2panda_rs::storage_provider::traits::EntryStore; use tokio::task; use crate::bus::{ServiceMessage, ServiceSender}; use crate::context::Context; -use crate::db::stores::StorageEntry; +use crate::db::types::StorageEntry; use crate::domain::publish; use crate::graphql::replication::client; use crate::manager::{ServiceReadySender, Shutdown}; @@ -168,7 +167,6 @@ async fn insert_new_entries( // modular set of methods which can definitely be used here more cleanly. For now, we do it // this way. - let encoded_entry: EncodedEntry = entry.clone().into(); let encoded_operation = entry .payload() .expect("All stored entries contain an operation"); @@ -184,7 +182,7 @@ async fn insert_new_entries( publish( &context.0.store, &schema, - &encoded_entry, + &entry.encoded_entry, &operation, encoded_operation, ) @@ -255,7 +253,7 @@ mod tests { use std::time::Duration; use p2panda_rs::storage_provider::traits::EntryStore; - use p2panda_rs::test_utils::db::test_db::{populate_store, PopulateDatabaseConfig}; + use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; use rstest::rstest; use tokio::sync::{broadcast, oneshot}; use tokio::task; @@ -288,7 +286,7 @@ mod tests { .schema_provider .update(doggo_schema()) .await; - let populate_db_config = PopulateDatabaseConfig { + let populate_db_config = PopulateStoreConfig { no_of_entries: 1, no_of_logs: 1, no_of_public_keys: 1, diff --git a/aquadoggo/src/validation.rs b/aquadoggo/src/validation.rs index a82adda0e..7a94c6468 100644 --- a/aquadoggo/src/validation.rs +++ b/aquadoggo/src/validation.rs @@ -5,7 +5,7 @@ use p2panda_rs::document::DocumentId; use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::traits::AsOperation; -use p2panda_rs::storage_provider::traits::StorageProvider; +use p2panda_rs::storage_provider::traits::{EntryStore, LogStore, OperationStore}; use p2panda_rs::Human; /// Verify that a claimed seq num is the next sequence number following the latest. @@ -44,14 +44,14 @@ pub fn is_next_seq_num(latest_seq_num: Option<&SeqNum>, claimed_seq_num: &SeqNum /// - Retrieve the stored log id for the document id /// - If found, ensure it matches the claimed log id /// - If not found retrieve the next available log id for this public key and ensure that matches -pub async fn verify_log_id( +pub async fn verify_log_id( store: &S, public_key: &PublicKey, claimed_log_id: &LogId, document_id: &DocumentId, ) -> Result<()> { // Check if there is a log id registered for this document and public key already in the store. - match store.get(public_key, document_id).await? { + match store.get_log_id(public_key, document_id).await? { Some(expected_log_id) => { // If there is, check it matches the log id encoded in the entry. ensure!( @@ -89,7 +89,7 @@ pub async fn verify_log_id( /// An error is returned if: /// - seq num 1 was passed in, which can not have a skiplink /// - the expected skiplink target could not be found in the database. -pub async fn get_expected_skiplink( +pub async fn get_expected_skiplink( store: &S, public_key: &PublicKey, log_id: &LogId, @@ -123,7 +123,7 @@ pub async fn get_expected_skiplink( /// Takes the following steps: /// - retrieve all operations for the given document id /// - ensure none of them contain a DELETE action -pub async fn ensure_document_not_deleted( +pub async fn ensure_document_not_deleted( store: &S, document_id: &DocumentId, ) -> Result<()> { @@ -141,7 +141,7 @@ pub async fn ensure_document_not_deleted( /// Takes the following steps: /// - retrieve the latest log id for the given public key /// - safely increment it by 1 -pub async fn next_log_id(store: &S, public_key: &PublicKey) -> Result { +pub async fn next_log_id(store: &S, public_key: &PublicKey) -> Result { let latest_log_id = store.latest_log_id(public_key).await?; match latest_log_id { @@ -173,11 +173,9 @@ mod tests { use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::KeyPair; use p2panda_rs::test_utils::constants::PRIVATE_KEY; - use p2panda_rs::test_utils::db::test_db::{ - populate_store, test_db_config, PopulateDatabaseConfig, - }; - use p2panda_rs::test_utils::db::MemoryStore; - use p2panda_rs::test_utils::fixtures::{key_pair, random_document_id}; + use p2panda_rs::test_utils::fixtures::{key_pair, populate_store_config, random_document_id}; + use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; + use p2panda_rs::test_utils::memory_store::MemoryStore; use rstest::rstest; use super::{ @@ -253,9 +251,9 @@ mod tests { #[case] key_pair: KeyPair, #[case] claimed_log_id: LogId, #[case] document_id: Option, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(2, 2, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await; @@ -290,9 +288,9 @@ mod tests { #[case] key_pair: KeyPair, #[case] log_id: LogId, #[case] seq_num: SeqNum, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(7, 1, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let _ = populate_store(&store, &config).await; @@ -319,9 +317,9 @@ mod tests { key_pair: KeyPair, #[case] seq_num: SeqNum, #[case] expected_seq_num: SeqNum, - #[from(test_db_config)] + #[from(populate_store_config)] #[with(10, 1, 1)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let _ = populate_store(&store, &config).await; @@ -338,9 +336,9 @@ mod tests { #[should_panic(expected = "Document is deleted")] #[tokio::test] async fn identifies_deleted_document( - #[from(test_db_config)] + #[from(populate_store_config)] #[with(3, 1, 1, true)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await; @@ -354,9 +352,9 @@ mod tests { #[rstest] #[tokio::test] async fn identifies_not_deleted_document( - #[from(test_db_config)] + #[from(populate_store_config)] #[with(3, 1, 1, false)] - config: PopulateDatabaseConfig, + config: PopulateStoreConfig, ) { let store = MemoryStore::default(); let (_, documents) = populate_store(&store, &config).await;