diff --git a/Cargo.lock b/Cargo.lock index aa1647d82..7088eefe1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -104,6 +104,7 @@ dependencies = [ "anyhow", "async-graphql", "async-graphql-axum", + "async-recursion", "async-trait", "axum", "bamboo-rs-core-ed25519-yasmf", @@ -262,6 +263,17 @@ dependencies = [ "serde_json", ] +[[package]] +name = "async-recursion" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.3" diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 6ed2f6b93..9262e6b91 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -18,6 +18,7 @@ edition = "2018" anyhow = "^1.0.58" async-graphql = "^3.0.38" async-graphql-axum = "^3.0.38" +async-recursion = "^1.0.0" async-trait = "^0.1.56" axum = "^0.5.10" bamboo-rs-core-ed25519-yasmf = "^0.1.1" @@ -30,6 +31,7 @@ futures = "^0.3.21" gql_client = "^1.0.6" lipmaa-link = "^0.2.2" log = "^0.4.17" +once_cell = "^1.12.0" openssl-probe = "^0.1.5" p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "5d6508d5a9b4b766621c3bd14879cc568fbac02d" } serde = { version = "^1.0.137", features = ["derive"] } diff --git a/aquadoggo/src/context.rs b/aquadoggo/src/context.rs index d3f3f61a8..e234b8b53 100644 --- a/aquadoggo/src/context.rs +++ b/aquadoggo/src/context.rs @@ -3,25 +3,27 @@ use std::ops::Deref; use std::sync::Arc; +use p2panda_rs::storage_provider::traits::StorageProvider; + use crate::config::Configuration; use crate::db::provider::SqlStorage; use crate::schema::SchemaProvider; /// Inner data shared across all services. #[derive(Debug)] -pub struct Data { +pub struct Data { /// Node configuration. pub config: Configuration, /// Storage provider with database connection pool. - pub store: SqlStorage, + pub store: S, /// Schema provider gives access to system and application schemas. pub schema_provider: SchemaProvider, } -impl Data { - pub fn new(store: SqlStorage, config: Configuration, schema_provider: SchemaProvider) -> Self { +impl Data { + pub fn new(store: S, config: Configuration, schema_provider: SchemaProvider) -> Self { Self { config, store, @@ -32,23 +34,23 @@ impl Data { /// Data shared across all services. #[derive(Debug)] -pub struct Context(pub Arc); +pub struct Context(pub Arc>); -impl Context { +impl Context { /// Returns a new instance of `Context`. - pub fn new(store: SqlStorage, config: Configuration, schema_provider: SchemaProvider) -> Self { + pub fn new(store: S, config: Configuration, schema_provider: SchemaProvider) -> Self { Self(Arc::new(Data::new(store, config, schema_provider))) } } -impl Clone for Context { +impl Clone for Context { fn clone(&self) -> Self { Self(self.0.clone()) } } -impl Deref for Context { - type Target = Data; +impl Deref for Context { + type Target = Data; fn deref(&self) -> &Self::Target { self.0.as_ref() diff --git a/aquadoggo/src/db/models/entry.rs b/aquadoggo/src/db/models/entry.rs index f34d7d92f..923f66462 100644 --- a/aquadoggo/src/db/models/entry.rs +++ b/aquadoggo/src/db/models/entry.rs @@ -8,7 +8,7 @@ use sqlx::FromRow; /// /// We store the u64 integer values of `log_id` and `seq_num` as strings since SQLite doesn't /// support storing unsigned 64 bit integers. -#[derive(FromRow, Debug, Serialize, Clone, PartialEq)] +#[derive(FromRow, Debug, Serialize, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct EntryRow { /// Public key of the author. diff --git a/aquadoggo/src/db/models/task.rs b/aquadoggo/src/db/models/task.rs index 4bc4d544d..67a1e80e4 100644 --- a/aquadoggo/src/db/models/task.rs +++ b/aquadoggo/src/db/models/task.rs @@ -6,7 +6,7 @@ use sqlx::FromRow; /// Representation of a row from the `tasks` table as stored in the database. /// /// This table holds all "pending" tasks of the materialization service worker. -#[derive(FromRow, Debug, Serialize, Clone, PartialEq)] +#[derive(FromRow, Debug, Serialize, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct TaskRow { /// Name of the task worker. diff --git a/aquadoggo/src/db/provider.rs b/aquadoggo/src/db/provider.rs index 971092f14..3ab5d72dd 100644 --- a/aquadoggo/src/db/provider.rs +++ b/aquadoggo/src/db/provider.rs @@ -121,10 +121,8 @@ mod tests { use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; - use super::SqlStorage; - /// Inserts a `DocumentView` into the db and returns its view id. - async fn insert_document_view(db: &TestDatabase) -> DocumentViewId { + async fn insert_document_view(db: &TestDatabase) -> DocumentViewId { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let entry = db .store @@ -156,7 +154,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_document_view(&db).await; let result = db .store @@ -175,7 +173,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let result = db .store .get_schema_by_document_view(&random_document_view_id) diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index 261b70dab..e6bd4a88b 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -333,7 +333,6 @@ mod tests { }; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::document::{DocumentStore, DocumentView}; use crate::db::stores::entry::StorageEntry; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; @@ -374,7 +373,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -436,7 +435,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let view_does_not_exist = db .store .get_document_view_by_id(&random_document_view_id) @@ -453,7 +452,7 @@ mod tests { #[with(10, 1, 1, false, SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let schema_id = SchemaId::from_str(SCHEMA_ID).unwrap(); @@ -511,7 +510,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, operation: Operation, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view = DocumentView::new( &document_view_id, &DocumentViewFields::new_from_operation_fields( @@ -535,7 +534,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -582,7 +581,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -629,7 +628,7 @@ mod tests { #[with(10, 1, 1, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -656,7 +655,7 @@ mod tests { #[with(10, 1, 1, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -687,7 +686,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents[0].clone(); let document_operations = db @@ -722,7 +721,7 @@ mod tests { #[with(10, 2, 1, false, SCHEMA_ID.parse().unwrap())] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let schema_id = SchemaId::from_str(SCHEMA_ID).unwrap(); for document_id in &db.test_data.documents { diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index c083dfc7f..70f35ff89 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -22,7 +22,7 @@ use crate::db::provider::SqlStorage; /// /// This struct implements the `AsStorageEntry` trait which is required when constructing the /// `EntryStore`. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageEntry { entry_signed: EntrySigned, operation_encoded: OperationEncoded, @@ -430,13 +430,12 @@ mod tests { use p2panda_rs::test_utils::fixtures::{entry, key_pair}; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::entry::StorageEntry; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] fn insert_entry(key_pair: KeyPair, entry: Entry, #[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap(); let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap(); @@ -452,7 +451,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let log_id = LogId::default(); @@ -481,7 +480,7 @@ mod tests { #[with(20, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); let log_id = LogId::default(); @@ -510,7 +509,7 @@ mod tests { #[with(20, 1, 2, false, SCHEMA_ID.parse().unwrap())] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let schema_not_in_the_db = SchemaId::new_application( "venue", &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), @@ -540,7 +539,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -590,7 +589,7 @@ mod tests { #[with(20, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -629,7 +628,7 @@ mod tests { #[with(30, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); @@ -666,7 +665,7 @@ mod tests { #[with(20, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); diff --git a/aquadoggo/src/db/stores/log.rs b/aquadoggo/src/db/stores/log.rs index 93f8227e2..a139d87df 100644 --- a/aquadoggo/src/db/stores/log.rs +++ b/aquadoggo/src/db/stores/log.rs @@ -231,7 +231,6 @@ mod tests { }; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::entry::StorageEntry; use crate::db::stores::log::StorageLog; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; @@ -241,7 +240,7 @@ mod tests { #[from(public_key)] author: Author, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); assert_eq!(log_id, LogId::default()); }); @@ -254,7 +253,7 @@ mod tests { #[from(random_document_id)] document: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log = StorageLog::new(&author, &schema, &document.clone(), &LogId::default()); assert!(db.store.insert_log(log).await.is_ok()); @@ -271,7 +270,7 @@ mod tests { #[from(random_document_id)] document: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let schema = SchemaId::new_application( "venue", &DocumentViewId::new(&[operation_id_1, operation_id_2]).unwrap(), @@ -289,7 +288,7 @@ mod tests { #[from(schema)] schema: SchemaId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); // We expect to be given the next log id when asking for a possible log id for a new @@ -317,7 +316,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, #[from(random_document_id)] document_id: DocumentId, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let log_id = db.store.latest_log_id(&author).await.unwrap(); assert_eq!(log_id, None); @@ -339,7 +338,7 @@ mod tests { #[from(operation_encoded)] operation_encoded: OperationEncoded, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Expect database to return nothing yet assert_eq!( db.store @@ -396,7 +395,7 @@ mod tests { #[from(random_document_id)] document_third: DocumentId, #[from(random_document_id)] document_forth: DocumentId, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Register two log ids at the beginning let log_1 = StorageLog::new(&author, &schema, &document_first, &LogId::default()); let log_2 = StorageLog::new(&author, &schema, &document_second, &LogId::new(1)); diff --git a/aquadoggo/src/db/stores/operation.rs b/aquadoggo/src/db/stores/operation.rs index ed8ab6ae9..7808b33a8 100644 --- a/aquadoggo/src/db/stores/operation.rs +++ b/aquadoggo/src/db/stores/operation.rs @@ -294,7 +294,6 @@ mod tests { }; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] @@ -310,7 +309,7 @@ mod tests { document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Construct the storage operation. let operation = VerifiedOperation::new(&author, &operation_id, &operation).unwrap(); @@ -338,7 +337,7 @@ mod tests { document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { db.store .insert_operation(&verified_operation, &document_id) .await @@ -363,7 +362,7 @@ mod tests { document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { assert!(db .store .get_document_by_operation_id(create_operation.operation_id()) @@ -408,7 +407,7 @@ mod tests { #[with(5, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); let latest_entry = db diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index 14b51ba81..5b113e5f0 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -56,6 +56,8 @@ impl SchemaStore for SqlStorage { /// Get all Schema which have been published to this node. /// /// Returns an error if a fatal db error occured. + /// + /// Silently ignores incomplete or broken schema definitions. async fn get_all_schema(&self) -> Result, SchemaStoreError> { let schema_views: Vec = self .get_documents_by_schema(&SchemaId::new("schema_definition_v1")?) @@ -184,7 +186,7 @@ mod tests { ) { let cddl_str = cddl_str.to_string(); - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; @@ -251,7 +253,7 @@ mod tests { ) { let err_str = err_str.to_string(); - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; @@ -298,7 +300,7 @@ mod tests { key_pair: KeyPair, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let document_view_id = insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; @@ -324,7 +326,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, key_pair: KeyPair, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_view_id = insert_schema_definition( &db.store, &key_pair, diff --git a/aquadoggo/src/db/stores/task.rs b/aquadoggo/src/db/stores/task.rs index 3707b1b77..aa45ce9dd 100644 --- a/aquadoggo/src/db/stores/task.rs +++ b/aquadoggo/src/db/stores/task.rs @@ -128,7 +128,6 @@ mod tests { use p2panda_rs::test_utils::fixtures::{document_id, document_view_id}; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::materializer::{Task, TaskInput}; @@ -137,7 +136,7 @@ mod tests { document_view_id: DocumentViewId, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Prepare test data let task = Task::new("reduce", TaskInput::new(None, Some(document_view_id))); @@ -161,7 +160,7 @@ mod tests { #[rstest] fn avoid_duplicates(document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Prepare test data let task = Task::new("reduce", TaskInput::new(Some(document_id), None)); diff --git a/aquadoggo/src/db/stores/test_utils/runner.rs b/aquadoggo/src/db/stores/test_utils/runner.rs index b5a30ff5a..010553b3e 100644 --- a/aquadoggo/src/db/stores/test_utils/runner.rs +++ b/aquadoggo/src/db/stores/test_utils/runner.rs @@ -11,27 +11,29 @@ use rstest::fixture; use tokio::runtime::Builder; use tokio::sync::Mutex; +use crate::context::Context; use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{ populate_test_db, PopulateDatabaseConfig, TestData, TestDatabase, }; use crate::db::Pool; use crate::test_helpers::{initialize_db, initialize_db_with_url}; +use crate::{Configuration, SchemaProvider}; use super::doggo_test_fields; #[async_trait::async_trait] pub trait AsyncTestFn { - async fn call(self, db: TestDatabase); + async fn call(self, db: TestDatabase); } #[async_trait::async_trait] impl AsyncTestFn for FN where - FN: FnOnce(TestDatabase) -> F + Sync + Send, + FN: FnOnce(TestDatabase) -> F + Sync + Send, F: Future + Send, { - async fn call(self, db: TestDatabase) { + async fn call(self, db: TestDatabase) { self(db).await } } @@ -77,8 +79,15 @@ impl TestDatabaseRunner { runtime.block_on(async { // Initialise test database let pool = initialize_db().await; + let store = SqlStorage::new(pool); + let context = Context::new( + store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); let mut db = TestDatabase { - store: SqlStorage::new(pool), + context, + store, test_data: TestData::default(), }; @@ -123,13 +132,14 @@ impl TestDatabaseManager { Self::default() } - pub async fn create(&self, url: &str) -> TestDatabase { - // Initialise test database + pub async fn create(&self, url: &str) -> TestDatabase { let pool = initialize_db_with_url(url).await; - let test_db = TestDatabase { - store: SqlStorage::new(pool.clone()), - test_data: TestData::default(), - }; + + // Initialise test store using pool. + let store = SqlStorage::new(pool.clone()); + + let test_db = TestDatabase::new(store.clone()); + self.pools.lock().await.push(pool); test_db } diff --git a/aquadoggo/src/db/stores/test_utils/store.rs b/aquadoggo/src/db/stores/test_utils/store.rs index 35a22005e..f9d3d7762 100644 --- a/aquadoggo/src/db/stores/test_utils/store.rs +++ b/aquadoggo/src/db/stores/test_utils/store.rs @@ -2,27 +2,137 @@ use std::convert::TryFrom; +use log::{debug, info}; use p2panda_rs::document::{DocumentId, DocumentViewId}; use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::{Author, KeyPair}; -use p2panda_rs::operation::{Operation, OperationEncoded, OperationValue}; -use p2panda_rs::schema::SchemaId; +use p2panda_rs::operation::{Operation, OperationEncoded, OperationFields, OperationValue}; +use p2panda_rs::schema::{FieldType, Schema, SchemaId}; use p2panda_rs::storage_provider::traits::StorageProvider; use p2panda_rs::test_utils::constants::SCHEMA_ID; use p2panda_rs::test_utils::fixtures::{operation, operation_fields}; +use crate::context::Context; +use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{doggo_test_fields, test_key_pairs}; use crate::domain::{next_args, publish}; use crate::graphql::client::NextEntryArguments; +use crate::materializer::tasks::{dependency_task, reduce_task, schema_task}; +use crate::materializer::TaskInput; +use crate::{Configuration, SchemaProvider}; /// Container for `SqlStore` with access to the document ids and key_pairs used in the /// pre-populated database for testing. -pub struct TestDatabase { +pub struct TestDatabase { + pub context: Context, pub store: S, pub test_data: TestData, } +impl TestDatabase { + pub fn new(store: S) -> Self { + // Initialise context for store. + let context = Context::new( + store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + + // Initialise finished test database. + TestDatabase { + context, + store, + test_data: TestData::default(), + } + } +} + +impl TestDatabase { + /// Publish a document and materialise it in the store. + /// + /// Also runs dependency task for document. + pub async fn add_document( + &mut self, + schema_id: &SchemaId, + fields: OperationFields, + key_pair: &KeyPair, + ) -> DocumentViewId { + info!("Creating document for {}", schema_id); + + // Get requested schema from store. + let schema = self + .context + .schema_provider + .get(schema_id) + .await + .expect("Schema not found"); + + // Build, publish and reduce create operation for document. + let create_op = Operation::new_create(schema.id().to_owned(), fields).unwrap(); + let (entry_signed, _) = send_to_store(&self.store, &create_op, None, key_pair).await; + let input = TaskInput::new(Some(DocumentId::from(entry_signed.hash())), None); + let dependency_tasks = reduce_task(self.context.clone(), input.clone()) + .await + .unwrap(); + + // Run dependency tasks + if let Some(tasks) = dependency_tasks { + for task in tasks { + dependency_task(self.context.clone(), task.input().to_owned()) + .await + .unwrap(); + } + } + DocumentViewId::from(entry_signed.hash()) + } + + /// Publish a schema and materialise it in the store. + pub async fn add_schema( + &mut self, + name: &str, + fields: Vec<(&str, FieldType)>, + key_pair: &KeyPair, + ) -> Schema { + info!("Creating schema {}", name); + let mut field_ids = Vec::new(); + + // Build and reduce schema field definitions + for field in fields { + let create_field_op = Schema::create_field(field.0, field.1.clone()).unwrap(); + let (entry_signed, _) = + send_to_store(&self.store, &create_field_op, None, key_pair).await; + + let input = TaskInput::new(Some(DocumentId::from(entry_signed.hash())), None); + reduce_task(self.context.clone(), input).await.unwrap(); + + info!("Added field '{}' ({})", field.0, field.1); + field_ids.push(DocumentViewId::from(entry_signed.hash())); + } + + // Build and reduce schema definition + let create_schema_op = Schema::create(name, "test schema description", field_ids).unwrap(); + let (entry_signed, _) = send_to_store(&self.store, &create_schema_op, None, key_pair).await; + let input = TaskInput::new(None, Some(DocumentViewId::from(entry_signed.hash()))); + reduce_task(self.context.clone(), input.clone()) + .await + .unwrap(); + + // Run schema task for this spec + schema_task(self.context.clone(), input).await.unwrap(); + + let view_id = DocumentViewId::from(entry_signed.hash()); + let schema_id = SchemaId::Application(name.to_string(), view_id); + + debug!("Done building {}", schema_id); + self.context + .schema_provider + .get(&schema_id) + .await + .expect("Failed adding schema to provider.") + } +} + /// Data collected when populating a `TestDatabase` in order to easily check values which /// would be otherwise hard or impossible to get through the store methods. #[derive(Default)] @@ -160,7 +270,7 @@ pub async fn send_to_store( // Publish the entry and get the next entry args. let publish_entry_response = publish(store, &entry_encoded, &operation_encoded) .await - .unwrap(); + .expect("Error publishing entry"); (entry_encoded, publish_entry_response) } diff --git a/aquadoggo/src/db/traits/schema.rs b/aquadoggo/src/db/traits/schema.rs index 6f7c9aacc..c92903534 100644 --- a/aquadoggo/src/db/traits/schema.rs +++ b/aquadoggo/src/db/traits/schema.rs @@ -20,6 +20,6 @@ pub trait SchemaStore { /// Get all published Schema from storage. /// /// Returns a vector of Schema, or an empty vector if none were found. Returns - /// an error when a fatal storage error occured or a schema could not be constructed. + /// an error when a fatal storage error occured or a schema could not be constructed. async fn get_all_schema(&self) -> Result, SchemaStoreError>; } diff --git a/aquadoggo/src/domain.rs b/aquadoggo/src/domain.rs index ee14e86df..5510b90b0 100644 --- a/aquadoggo/src/domain.rs +++ b/aquadoggo/src/domain.rs @@ -406,10 +406,9 @@ mod tests { }; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{ doggo_test_fields, encode_entry_and_operation, populate_test_db, send_to_store, test_db, - test_db_config, PopulateDatabaseConfig, TestData, TestDatabase, TestDatabaseRunner, + test_db_config, PopulateDatabaseConfig, TestDatabase, TestDatabaseRunner, }; use crate::domain::publish; use crate::graphql::client::NextEntryArguments; @@ -450,7 +449,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, #[from(random_document_view_id)] document_view_id: DocumentViewId, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let result = get_checked_document_id_for_view_id(&db.store, &document_view_id).await; assert!(result.is_err()); }); @@ -462,7 +461,7 @@ mod tests { operation: Operation, operation_fields: OperationFields, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Store one entry and operation in the store. let (entry, _) = send_to_store(&db.store, &operation, None, &KeyPair::new()).await; let operation_one_id: OperationId = entry.hash().into(); @@ -538,11 +537,8 @@ mod tests { #[with(8, 1, 1)] config: PopulateDatabaseConfig, ) { - // Populate the db with 8 entries. - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; // The author who has published to the db. @@ -604,12 +600,10 @@ mod tests { #[with(8, 2, 1)] config: PopulateDatabaseConfig, ) { - // Populate the db with 8 entries. - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; + let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); // Get the document id. @@ -684,11 +678,10 @@ mod tests { #[with(8, 2, 1)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; + let author_with_removed_operations = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); let author_making_request = Author::try_from(key_pair.public_key().to_owned()).unwrap(); @@ -753,6 +746,8 @@ mod tests { #[case] document_view_id: Option, #[case] expected_next_args: (SeqNumU64, Backlink, Skiplink), ) { + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); // Populate the db with the number of entries defined in the test params. let config = PopulateDatabaseConfig { no_of_entries, @@ -760,11 +755,6 @@ mod tests { no_of_authors: 1, ..PopulateDatabaseConfig::default() }; - - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; populate_test_db(&mut db, &config).await; // The author who published the entries. @@ -823,10 +813,8 @@ mod tests { #[with(7, 1, 1)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; // Get with no DocumentViewId given. @@ -889,10 +877,8 @@ mod tests { #[with(2, 1, 1)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; let document_id = db.test_data.documents.first().unwrap(); @@ -955,10 +941,8 @@ mod tests { #[with(1, 2, 1)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; let entry = Entry::new(&log_id, Some(&operation), None, None, &SeqNum::default()).unwrap(); @@ -987,11 +971,10 @@ mod tests { #[with(2, 1, 1, true)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; + let document_id = db.test_data.documents.first().unwrap(); let document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); let author_performing_update = Author::try_from(key_pair.public_key().to_owned()).unwrap(); @@ -1036,11 +1019,10 @@ mod tests { #[with(3, 1, 1, true)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; + let document_id = db.test_data.documents.first().unwrap(); let document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); @@ -1052,7 +1034,7 @@ mod tests { #[rstest] fn publish_many_entries(key_pair: KeyPair, #[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let num_of_entries = 13; let mut document_id: Option = None; let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); @@ -1107,11 +1089,8 @@ mod tests { #[with(2, 1, 1, false)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; - + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); @@ -1157,11 +1136,8 @@ mod tests { #[with(2, 1, 1, false)] config: PopulateDatabaseConfig, ) { - let mut db = TestDatabase { - store: MemoryStore::default(), - test_data: TestData::default(), - }; - + let store = MemoryStore::default(); + let mut db = TestDatabase::new(store.clone()); populate_test_db(&mut db, &config).await; let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); diff --git a/aquadoggo/src/errors.rs b/aquadoggo/src/errors.rs index 41832b218..90b37ef48 100644 --- a/aquadoggo/src/errors.rs +++ b/aquadoggo/src/errors.rs @@ -1,4 +1,25 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use p2panda_rs::schema::{SchemaId, SchemaIdError}; +use thiserror::Error; + /// A result type used in aquadoggo modules. pub type Result = anyhow::Result>; + +/// Errors returned by schema service. +#[derive(Error, Debug)] +pub enum SchemaProviderError { + /// Schema service can only handle application schemas it has definitions for. + #[allow(dead_code)] + #[error("not a known application schema: {0}")] + UnknownApplicationSchema(SchemaId), + + /// This operation has a requirement on the schema parameter. + #[allow(dead_code)] + #[error("invalid schema: {0}, {1}")] + InvalidSchema(SchemaId, String), + + /// Schema service can only handle valid schema ids. + #[error(transparent)] + InvalidSchemaId(#[from] SchemaIdError), +} diff --git a/aquadoggo/src/graphql/client/dynamic_query.rs b/aquadoggo/src/graphql/client/dynamic_query.rs new file mode 100644 index 000000000..696108e45 --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_query.rs @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Resolver for dynamic fields of the client API. +use async_graphql::indexmap::IndexMap; +use async_graphql::{ + ContainerType, Context, Name, SelectionField, ServerError, ServerResult, Value, +}; +use async_recursion::async_recursion; +use async_trait::async_trait; +use futures::future; +use log::{debug, error, info}; +use p2panda_rs::document::{DocumentId, DocumentView, DocumentViewId}; +use p2panda_rs::operation::OperationValue; +use p2panda_rs::schema::SchemaId; +use p2panda_rs::Human; + +use crate::db::provider::SqlStorage; +use crate::db::traits::DocumentStore; +use crate::graphql::client::dynamic_types; +use crate::graphql::client::dynamic_types::DocumentMeta; +use crate::graphql::client::utils::validate_view_matches_schema; +use crate::graphql::scalars::{DocumentIdScalar, DocumentViewIdScalar}; +use crate::schema::SchemaProvider; + +/// Resolves queries for documents based on p2panda schemas. +/// +/// Implements [`ContainerType`] to be able to resolve arbitrary fields selected by a query on the +/// root GraphQL schema. +/// +/// This implementation always has to match what is defined by the corresponding `OutputType` implementation for `DynamicQuery`. +#[derive(Debug, Default)] +pub struct DynamicQuery; + +#[async_trait] +impl ContainerType for DynamicQuery { + /// This resolver is called for all queries but we only want to resolve if the queried field + /// can actually be parsed in of the two forms: + /// + /// - `` - query a single document + /// - `all_` - query a collection of documents + async fn resolve_field(&self, ctx: &Context<'_>) -> ServerResult> { + let field_name = ctx.field().name(); + + // Optimistically parse as collection query if field name begins with `all_` (it might + // still be a single document query if the schema name itself starts with `all_`). + if field_name.starts_with("all_") { + if let Ok(schema_id) = field_name.split_at(4).1.parse::() { + // Retrieve the schema to make sure that this is actually a schema and doesn't just + // look like one. + let schema_provider = ctx.data_unchecked::(); + if schema_provider.get(&schema_id).await.is_some() { + return self.query_collection(&schema_id, ctx).await; + } + } + } + // Continue by trying to parse it as a schema and, if that's successful, checking whether + // this schema is available in the schema provider. If both are successfull, continue by + // resolving this query as a query for a single document. + if let Ok(schema_id) = field_name.parse::() { + let schema_provider = ctx.data_unchecked::(); + if schema_provider.get(&schema_id).await.is_some() { + return self.query_single(&schema_id, ctx).await; + } + } + + // Return `None` to signal that other resolvers should be tried for this query. + Ok(None) + } +} + +impl DynamicQuery { + /// Returns a single document as a GraphQL value. + async fn query_single( + &self, + schema_id: &SchemaId, + ctx: &Context<'_>, + ) -> ServerResult> { + info!("Handling single query for {}", schema_id.display()); + + let document_id_arg = ctx.param_value::>( + dynamic_types::dynamic_query_output::DOCUMENT_ID_ARGUMENT, + None, + )?; + let view_id_arg = ctx.param_value::>( + dynamic_types::dynamic_query_output::VIEW_ID_ARGUMENT, + None, + )?; + + // Answer queries where the `viewId` argument is given. + if let Some(view_id_scalar) = view_id_arg.clone().1 { + let view_id = DocumentViewId::from(&view_id_scalar); + + // Return early and ignore the `id` argument because it doesn't provide any additional + // information that we don't get from the view id. + return Ok(Some( + self.get_by_document_view_id( + view_id, + ctx, + ctx.field().selection_set().collect(), + Some(schema_id), + ) + .await?, + )); + } + + // Answer queries where the `id` argument is given. + if let Some(document_id_scalar) = document_id_arg.1 { + return Ok(Some( + self.get_by_document_id( + DocumentId::from(&document_id_scalar), + ctx, + ctx.field().selection_set().collect(), + Some(schema_id), + ) + .await?, + )); + } + + Err(ServerError::new( + "Must provide either `id` or `viewId` argument".to_string(), + Some(ctx.item.pos), + )) + } + + /// Returns all documents for the given schema as a GraphQL value. + async fn query_collection( + &self, + schema_id: &SchemaId, + ctx: &Context<'_>, + ) -> ServerResult> { + info!("Handling collection query for {}", schema_id.display()); + + let store = ctx.data_unchecked::(); + + // Retrieve all documents for schema from storage. + let documents = store + .get_documents_by_schema(schema_id) + .await + .map_err(|err| ServerError::new(err.to_string(), None))?; + + // Assemble views async + let documents_graphql_values = documents.into_iter().map(|view| async move { + let selected_fields = ctx.field().selection_set().collect(); + self.document_response(view, ctx, selected_fields).await + }); + Ok(Some(Value::List( + future::try_join_all(documents_graphql_values).await?, + ))) + } + + /// Fetches the latest view for the given document id from the store and returns it as a + /// GraphQL value. + /// + /// Recurses into relations when those are selected in `selected_fields`. + /// + /// If the `validate_schema` parameter has a value, returns an error if the resolved document + /// doesn't match this schema. + #[async_recursion] + async fn get_by_document_id( + &self, + document_id: DocumentId, + ctx: &Context<'_>, + selected_fields: Vec>, + validate_schema: Option<&'async_recursion SchemaId>, + ) -> ServerResult { + debug!("Fetching {} from store", document_id.display()); + + let store = ctx.data_unchecked::(); + let view = store.get_document_by_id(&document_id).await.unwrap(); + match view { + Some(view) => { + // Validate the document's schema if the `validate_schema` argument is set. + if let Some(expected_schema_id) = validate_schema { + validate_view_matches_schema( + view.id(), + expected_schema_id, + store, + Some(ctx.item.pos), + ) + .await?; + } + + self.document_response(view, ctx, selected_fields).await + } + None => { + error!("No view found for document {}", document_id.as_str()); + Ok(Value::Null) + } + } + } + + /// Fetches the given document view id from the store and returns it as a GraphQL value. + /// + /// Recurses into relations when those are selected in `selected_fields`. + /// + /// If the `validate_schema` parameter has a value, returns an error if the resolved document + /// doesn't match this schema. + #[async_recursion] + async fn get_by_document_view_id( + &self, + document_view_id: DocumentViewId, + ctx: &Context<'_>, + selected_fields: Vec>, + validate_schema: Option<&'async_recursion SchemaId>, + ) -> ServerResult { + debug!("Fetching {} from store", document_view_id.display()); + + let store = ctx.data_unchecked::(); + let view = store + .get_document_view_by_id(&document_view_id) + .await + .unwrap(); + match view { + Some(view) => { + // Validate the document's schema if the `validate_schema` argument is set. + if let Some(expected_schema_id) = validate_schema { + validate_view_matches_schema( + view.id(), + expected_schema_id, + store, + Some(ctx.item.pos), + ) + .await?; + } + self.document_response(view, ctx, selected_fields).await + } + None => Ok(Value::Null), + } + } + + /// Builds a GraphQL response value for a document. + /// + /// This uses unstable, undocumented features of `async_graphql`. + #[async_recursion] + async fn document_response( + &self, + view: DocumentView, + ctx: &Context<'_>, + selected_fields: Vec>, + ) -> ServerResult { + let mut document_fields = IndexMap::new(); + + for field in selected_fields { + // Assemble selected metadata values. + if field.name() == dynamic_types::document::META_FIELD { + document_fields.insert( + Name::new(field.alias().unwrap_or_else(|| field.name())), + DocumentMeta::resolve(field, None, Some(view.id())), + ); + } + + // Assemble selected document field values. + if field.name() == dynamic_types::document::FIELDS_FIELD { + let subselection = field.selection_set().collect(); + document_fields.insert( + Name::new(field.alias().unwrap_or_else(|| field.name())), + self.document_fields_response(view.clone(), ctx, subselection) + .await?, + ); + } + } + + Ok(Value::Object(document_fields)) + } + + /// Builds a GraphQL response value for a document's fields. + /// + /// This uses unstable, undocumented features of `async_graphql`. + #[async_recursion] + async fn document_fields_response( + &self, + view: DocumentView, + ctx: &Context<'_>, + selected_fields: Vec>, + ) -> ServerResult { + let store = ctx.data_unchecked::(); + let schema_id = store + .get_schema_by_document_view(view.id()) + .await + .map_err(|err| ServerError::new(err.to_string(), None))? + .unwrap(); + + let schema_provider = ctx.data_unchecked::(); + // Unwrap because this schema id comes from the store. + let schema = schema_provider.get(&schema_id).await.unwrap(); + + // Construct GraphQL value for every field of the given view that has been selected. + let mut view_fields = IndexMap::new(); + for selected_field in selected_fields { + if !schema.fields().contains_key(selected_field.name()) { + return Err(ServerError::new( + format!( + "Field {} does not exist for schema {}", + selected_field.name(), + schema + ), + None, + )); + } + // Retrieve the current field's value from the document view. Unwrap because we have + // checked that this field exists on the schema. + let document_view_value = view.get(selected_field.name()).unwrap(); + + // Collect any further fields that have been selected on the current field. + let next_selection: Vec> = + selected_field.selection_set().collect(); + + let value = match document_view_value.value() { + // Recurse into single views. + OperationValue::Relation(rel) => { + self.get_by_document_id(rel.document_id().clone(), ctx, next_selection, None) + .await? + } + OperationValue::PinnedRelation(rel) => { + self.get_by_document_view_id(rel.view_id().clone(), ctx, next_selection, None) + .await? + } + + // Recurse into view lists. + OperationValue::RelationList(rel) => { + let queries = rel.clone().into_iter().map(|doc_id| { + self.get_by_document_id(doc_id, ctx, next_selection.clone(), None) + }); + Value::List(future::try_join_all(queries).await?) + } + OperationValue::PinnedRelationList(rel) => { + let queries = rel.clone().into_iter().map(|view_id| { + self.get_by_document_view_id(view_id, ctx, next_selection.clone(), None) + }); + Value::List(future::try_join_all(queries).await?) + } + + // Convert all simple fields to scalar values. + _ => gql_scalar(document_view_value.value()), + }; + view_fields.insert( + Name::new( + selected_field + .alias() + .unwrap_or_else(|| selected_field.name()), + ), + value, + ); + } + Ok(Value::Object(view_fields)) + } +} + +/// Convert non-relation operation values into GraphQL values. +/// +/// Panics when given a relation field value. +fn gql_scalar(operation_value: &OperationValue) -> Value { + match operation_value { + OperationValue::Boolean(value) => value.to_owned().into(), + OperationValue::Integer(value) => value.to_owned().into(), + OperationValue::Float(value) => value.to_owned().into(), + OperationValue::Text(value) => value.to_owned().into(), + // only use for scalars + _ => panic!("can only return scalar values"), + } +} + +#[cfg(test)] +mod test { + use std::convert::TryInto; + + use async_graphql::{value, Response, Value}; + use p2panda_rs::document::DocumentId; + use p2panda_rs::schema::FieldType; + use p2panda_rs::test_utils::fixtures::random_key_pair; + use rstest::rstest; + use serde_json::json; + + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::test_helpers::graphql_test_client; + + #[rstest] + fn single_query(#[from(test_db)] runner: TestDatabaseRunner) { + // Test single query parameter variations. + + runner.with_db_teardown(&|mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + // Add schema to node. + let schema = db + .add_schema("schema_name", vec![("bool", FieldType::Bool)], &key_pair) + .await; + + // Publish document on node. + let view_id = db + .add_document( + schema.id(), + vec![("bool", true.into())].try_into().unwrap(), + &key_pair, + ) + .await; + let document_id = + DocumentId::from(view_id.graph_tips().first().unwrap().as_hash().to_owned()); + + // Configure and send test query. + let client = graphql_test_client(&db).await; + let query = format!( + r#"{{ + byViewId: {type_name}(viewId: "{view_id}") {{ + fields {{ bool }} + }}, + byDocumentId: {type_name}(id: "{document_id}") {{ + fields {{ bool }} + }} + }}"#, + type_name = schema.id().to_string(), + view_id = view_id.to_string(), + document_id = document_id.as_str() + ); + + let response = client + .post("/graphql") + .json(&json!({ + "query": query, + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "byViewId": value!({ "fields": { "bool": true, } }), + "byDocumentId": value!({ "fields": { "bool": true, } }), + }); + assert_eq!(response.data, expected_data, "{:#?}", response.errors); + }); + } + + #[rstest] + #[case::unknown_document_id( + "id: \"00208f7492d6eb01360a886dac93da88982029484d8c04a0bd2ac0607101b80a6634\"", + value!({ + "view": Value::Null + }), + vec![] + )] + #[case::unknown_view_id( + "viewId: \"00208f7492d6eb01360a886dac93da88982029484d8c04a0bd2ac0607101b80a6634\"", + value!({ + "view": Value::Null + }), + vec![] + )] + #[case::malformed_document_id( + "id: \"verboten\"", + Value::Null, + vec!["Failed to parse \"DocumentIdScalar\": invalid hex encoding in hash string".to_string()] + )] + #[case::malformed_view_id( + "viewId: \"verboten\"", + Value::Null, + vec!["Failed to parse \"DocumentViewIdScalar\": invalid hex encoding in hash string".to_string()] + )] + #[case::missing_parameters( + "id: null", + Value::Null, + vec!["Must provide either `id` or `viewId` argument".to_string()] + )] + fn single_query_error_handling( + #[from(test_db)] runner: TestDatabaseRunner, + #[case] params: String, + #[case] expected_value: Value, + #[case] expected_errors: Vec, + ) { + // Test single query parameter variations. + + runner.with_db_teardown(move |db: TestDatabase| async move { + // Configure and send test query. + let client = graphql_test_client(&db).await; + let query = format!( + r#"{{ + view: schema_definition_v1({params}) {{ + fields {{ name }} + }} + }}"#, + params = params + ); + + let response = client + .post("/graphql") + .json(&json!({ + "query": query, + })) + .send() + .await; + + let response: Response = response.json().await; + + // Assert response data. + assert_eq!(response.data, expected_value, "{:#?}", response); + + // Assert error messages. + let err_msgs: Vec = response + .errors + .iter() + .map(|err| err.message.to_string()) + .collect(); + assert_eq!(err_msgs, expected_errors); + }); + } + + #[rstest] + fn collection_query(#[from(test_db)] runner: TestDatabaseRunner) { + // Test collection query parameter variations. + + runner.with_db_teardown(&|mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + // Add schema to node. + let schema = db + .add_schema("schema_name", vec![("bool", FieldType::Bool)], &key_pair) + .await; + + // Publish document on node. + db.add_document( + &schema.id(), + vec![("bool", true.into())].try_into().unwrap(), + &key_pair, + ) + .await; + + // Configure and send test query. + let client = graphql_test_client(&db).await; + let query = format!( + r#"{{ + collection: all_{type_name} {{ + fields {{ bool }} + }}, + }}"#, + type_name = schema.id(), + ); + + let response = client + .post("/graphql") + .json(&json!({ + "query": query, + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "collection": value!([{ "fields": { "bool": true, } }]), + }); + assert_eq!(response.data, expected_data, "{:#?}", response.errors); + }); + } +} diff --git a/aquadoggo/src/graphql/client/dynamic_types/document.rs b/aquadoggo/src/graphql/client/dynamic_types/document.rs new file mode 100644 index 000000000..e328c18b7 --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/document.rs @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::indexmap::IndexMap; +use p2panda_rs::schema::Schema; + +use crate::graphql::client::dynamic_types::utils::{metafield, metaobject}; +use crate::graphql::client::dynamic_types::{DocumentFields, DocumentMeta}; + +/// Fieldname on document for accessing document metadata. +pub const META_FIELD: &str = "meta"; + +/// Fieldname on document for accessing document view fields. +pub const FIELDS_FIELD: &str = "fields"; + +/// Represents documents of a p2panda schema. +pub struct Document(&'static Schema); + +impl Document { + /// Get a new instance for the given schema, which must be `static`. + pub fn new(schema: &'static Schema) -> Self { + Self(schema) + } + + /// Access the inner schema. + pub fn schema(&self) -> &'static Schema { + self.0 + } + + /// Access the schema's name. + pub fn type_name(&self) -> String { + self.schema().id().to_string() + } + + /// Generate an object type that represents documents of this schema in the GraphQL API. + /// + /// Be mindful when changing field names as these also have to be changed in the dynamic query + /// resolver to match. + pub fn register_type(&self, registry: &mut async_graphql::registry::Registry) { + // Register the type of this schema's `fields` type. + let fields_type = DocumentFields::new(self.schema()); + fields_type.register_type(registry); + + // Assemble field definitions for this schema itself. + let mut fields = IndexMap::new(); + + // Insert field `meta`. + fields.insert( + META_FIELD.to_string(), + metafield(META_FIELD, None, DocumentMeta::type_name()), + ); + + // Insert field `fields`. + fields.insert( + FIELDS_FIELD.to_string(), + metafield(FIELDS_FIELD, None, &fields_type.type_name()), + ); + + // Finally register the metatype for this schema. + let metatype = metaobject(&self.type_name(), Some(self.schema().description()), fields); + registry.types.insert(self.type_name(), metatype); + } +} diff --git a/aquadoggo/src/graphql/client/dynamic_types/document_fields.rs b/aquadoggo/src/graphql/client/dynamic_types/document_fields.rs new file mode 100644 index 000000000..7d20c9ece --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/document_fields.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::indexmap::IndexMap; +use p2panda_rs::schema::Schema; + +use crate::graphql::client::dynamic_types::utils::{graphql_typename, metafield, metaobject}; + +/// Represents fields for documents of a specific schema in the GraphQL client API. +pub struct DocumentFields(&'static Schema); + +impl DocumentFields { + /// Get a new instance for the given schema, which must be `static`. + pub fn new(schema: &'static Schema) -> Self { + Self(schema) + } + + /// Returns the type name, formatted like `Fields`. + pub fn type_name(&self) -> String { + format!("{}Fields", self.0.id()) + } + + /// Generate an object type and register it in a GraphQL schema registry. + pub fn register_type(&self, registry: &mut async_graphql::registry::Registry) { + let mut fields = IndexMap::new(); + + // Create a GraphQL field for every schema field. + self.0.fields().iter().for_each(|(field_name, field_type)| { + fields.insert( + field_name.to_string(), + metafield(field_name, None, &graphql_typename(field_type)), + ); + }); + + // Create a meta object with the fields defined above and insert it into the registry. + registry.types.insert( + self.type_name(), + metaobject( + &self.type_name(), + Some("Data fields available on documents of this schema."), + fields, + ), + ); + } +} diff --git a/aquadoggo/src/graphql/client/dynamic_types/document_meta.rs b/aquadoggo/src/graphql/client/dynamic_types/document_meta.rs new file mode 100644 index 000000000..8d14e836d --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/document_meta.rs @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::indexmap::IndexMap; +use async_graphql::{Name, OutputType, ScalarType, SelectionField, Value}; +use p2panda_rs::document::{DocumentId, DocumentViewId}; + +use crate::graphql::client::dynamic_types::utils::{metafield, metaobject}; +use crate::graphql::scalars::{DocumentIdScalar, DocumentViewIdScalar}; + +/// Name of the field for accessing the document's id. +pub const DOCUMENT_ID_FIELD: &str = "documentId"; + +/// Name of the field for accessing the document's view id. +pub const VIEW_ID_FIELD: &str = "viewId"; + +/// The GraphQL type for generic document metadata. +pub struct DocumentMeta; + +impl DocumentMeta { + pub fn type_name() -> &'static str { + "DocumentMeta" + } + + /// Generate an object type for generic metadata and register it in a GraphQL schema registry. + pub fn register_type(registry: &mut async_graphql::registry::Registry) { + let mut fields = IndexMap::new(); + + fields.insert( + DOCUMENT_ID_FIELD.to_string(), + metafield( + DOCUMENT_ID_FIELD, + Some("The document id of this response object."), + &*DocumentIdScalar::type_name(), + ), + ); + + // Manually register scalar type in registry because it's not used in the static api. + DocumentViewIdScalar::create_type_info(registry); + + fields.insert( + VIEW_ID_FIELD.to_string(), + metafield( + VIEW_ID_FIELD, + Some("The specific document view id contained in this response object."), + &*DocumentViewIdScalar::type_name(), + ), + ); + + registry.types.insert( + Self::type_name().to_string(), + metaobject( + Self::type_name(), + Some("Metadata for documents of this schema."), + fields, + ), + ); + } + + /// Resolve GraphQL response value for metadata query field. + /// + /// All parameters that are available should be set. + // Override rule to avoid unnecessary nesting. + #[allow(clippy::unnecessary_unwrap)] + pub fn resolve( + root_field: SelectionField, + document_id: Option<&DocumentId>, + view_id: Option<&DocumentViewId>, + ) -> Value { + let mut meta_fields = IndexMap::new(); + + for meta_field in root_field.selection_set() { + if meta_field.name() == DOCUMENT_ID_FIELD && document_id.is_some() { + meta_fields.insert( + Name::new(DOCUMENT_ID_FIELD), + DocumentIdScalar::from(document_id.unwrap()).to_value(), + ); + } + + if meta_field.name() == VIEW_ID_FIELD && view_id.is_some() { + meta_fields.insert( + Name::new(VIEW_ID_FIELD), + Value::String(view_id.unwrap().to_string()), + ); + } + } + Value::Object(meta_fields) + } +} diff --git a/aquadoggo/src/graphql/client/dynamic_types/dynamic_query_output.rs b/aquadoggo/src/graphql/client/dynamic_types/dynamic_query_output.rs new file mode 100644 index 000000000..c34acacb6 --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/dynamic_query_output.rs @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! GraphQL types for all [schemas][`Schema`] available in the schema provider. +//! +//! `async_graphql` doesn't provide an API for registering types that don't correspond to +//! any Rust type. This module uses undocumented, internal functionality of `async_graphql` to +//! circumvent this restriction. By implementing `OutputType` for [`DynamicQuery`] we are given +//! mutable access to the type registry and can insert types into it. +use std::borrow::Cow; + +use async_graphql::indexmap::IndexMap; +use async_graphql::parser::types::Field; +use async_graphql::registry::{MetaField, MetaInputValue, MetaTypeId}; +use async_graphql::{ContextSelectionSet, OutputType, Positioned, ServerResult, Value}; +use p2panda_rs::schema::Schema; + +use crate::graphql::client::dynamic_types::utils::{metafield, metaobject}; +use crate::graphql::client::dynamic_types::{Document, DocumentMeta}; +use crate::graphql::client::DynamicQuery; +use crate::graphql::scalars::{DocumentIdScalar, DocumentViewIdScalar}; +use crate::schema::load_static_schemas; + +/// Name of the field argument for requesting a document id. +pub const DOCUMENT_ID_ARGUMENT: &str = "id"; + +/// Name of the field argument for requesting a document view id. +pub const VIEW_ID_ARGUMENT: &str = "viewId"; + +#[async_trait::async_trait] +impl OutputType for DynamicQuery { + /// Register all GraphQL types for schemas currently available in the schema provider. + fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String { + // Load schema definitions + let schemas: &'static Vec = load_static_schemas(); + + // This callback is given a mutable reference to the registry! + registry.create_output_type::(MetaTypeId::Object, |reg| { + // Insert queries for all registered schemas. + let mut fields = IndexMap::new(); + + // Generic type of document metadata. + DocumentMeta::register_type(reg); + + // Insert GraphQL types for all registered schemas. + for schema in schemas { + // Register types for both this schema's `DocumentType` and its + // `DocumentFieldsType`. + let document_type = Document::new(schema); + document_type.register_type(reg); + + // Insert a single and collection query field for every schema with which documents + // of that schema can be queried. + fields.insert( + document_type.type_name(), + MetaField { + name: document_type.type_name(), + description: Some("Query a single document of this schema."), + ty: document_type.type_name(), + args: { + let mut single_args = IndexMap::new(); + single_args.insert( + DOCUMENT_ID_ARGUMENT.to_string(), + MetaInputValue { + name: DOCUMENT_ID_ARGUMENT, + description: None, + ty: DocumentIdScalar::type_name().to_string(), + default_value: None, + visible: None, + is_secret: false, + }, + ); + single_args.insert( + VIEW_ID_ARGUMENT.to_string(), + MetaInputValue { + name: VIEW_ID_ARGUMENT, + description: None, + ty: DocumentViewIdScalar::type_name().to_string(), + default_value: None, + visible: None, + is_secret: false, + }, + ); + single_args + }, + deprecation: Default::default(), + cache_control: Default::default(), + external: false, + requires: None, + provides: None, + visible: None, + compute_complexity: None, + oneof: false, + }, + ); + + fields.insert( + format!("all_{}", document_type.type_name()), + metafield( + &format!("all_{}", document_type.type_name()), + Some("Query all documents of this schema."), + &document_type.type_name(), + ), + ); + } + + metaobject( + "dynamic_query_api", + Some("Container for dynamically generated document api"), + fields, + ) + }) + } + + /// We don't expect this resolver to ever be called because dynamic schemas are not resolved + /// by `async_graphql` but by our own resolver implementation. + async fn resolve( + &self, + _ctx: &ContextSelectionSet<'_>, + _field: &Positioned, + ) -> ServerResult { + unreachable!("This resolver should have never been called. Please file a bug report!") + } + + fn type_name() -> Cow<'static, str> { + Cow::Owned("dynamic_query_api".into()) + } + + fn qualified_type_name() -> String { + format!("{}!", ::type_name()) + } + + fn introspection_type_name(&self) -> Cow<'static, str> { + ::type_name() + } +} diff --git a/aquadoggo/src/graphql/client/dynamic_types/mod.rs b/aquadoggo/src/graphql/client/dynamic_types/mod.rs new file mode 100644 index 000000000..c7152f123 --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/mod.rs @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Dynamic type definitions for the client api. +//! +//! All dynamic type definitions are inserted from the `OutputType` implementation in the +//! [`dynamic_query_output`] module. +pub(crate) mod document; +mod document_fields; +pub(crate) mod document_meta; +pub(crate) mod dynamic_query_output; +#[cfg(test)] +mod tests; +mod utils; + +pub use document::Document; +pub use document_fields::DocumentFields; +pub use document_meta::DocumentMeta; diff --git a/aquadoggo/src/graphql/client/dynamic_types/tests.rs b/aquadoggo/src/graphql/client/dynamic_types/tests.rs new file mode 100644 index 000000000..7ed175420 --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/tests.rs @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Test correct generation of output schema. +use async_graphql::{value, Response, Value}; +use p2panda_rs::schema::{FieldType, SchemaId, SYSTEM_SCHEMAS}; +use p2panda_rs::test_utils::fixtures::random_key_pair; +use rstest::rstest; +use serde_json::json; + +use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; +use crate::test_helpers::graphql_test_client; + +#[rstest] +#[case(SYSTEM_SCHEMAS[0].id().to_string(), SYSTEM_SCHEMAS[0].description().to_string())] +#[case(SYSTEM_SCHEMAS[1].id().to_string(), SYSTEM_SCHEMAS[1].description().to_string())] +fn system_schema_container_type( + #[from(test_db)] runner: TestDatabaseRunner, + #[case] type_name: String, + #[case] type_description: String, +) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let client = graphql_test_client(&db).await; + let response = client + .post("/graphql") + .json(&json!({ + "query": format!( + r#"{{ + __type(name: "{}") {{ + kind, + name, + description, + fields {{ + name, + type {{ + name + }} + }} + }} + }}"#, + type_name + ), + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "__type": { + // Currently, all system schemas are object types. + "kind": "OBJECT", + "name": type_name, + "description": type_description, + "fields": [{ + "name": "meta", + "type": { + "name": "DocumentMeta" + } + }, + { + "name": "fields", + "type": { + "name": format!("{}Fields", type_name) + } + }] + } + }); + + assert_eq!(response.data, expected_data, "\n{:#?}\n", response.errors); + }); +} + +#[rstest] +fn application_schema_container_type(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + // Add schema to node. + let schema = db + .add_schema( + "schema_name", + vec![("bool_field", FieldType::Bool)], + &key_pair, + ) + .await; + let type_name = schema.id().to_string(); + + let client = graphql_test_client(&db).await; + let response = client + .post("/graphql") + .json(&json!({ + "query": format!( + r#"{{ + schema: __type(name: "{}") {{ + kind, + name, + description, + fields {{ + name, + type {{ + name + }} + }} + }}, + }}"#, + type_name, + ), + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "schema": { + "kind": "OBJECT", + "name": type_name, + "description": schema.description(), + "fields": [{ + "name": "meta", + "type": { + "name": "DocumentMeta" + } + }, + { + "name": "fields", + "type": { + "name": format!("{}Fields", type_name) + } + }] + }, + }); + + assert_eq!(response.data, expected_data, "\n{:#?}\n", response.errors); + }); +} + +#[rstest] +fn application_schema_fields_type(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + // Add schema to node. + let schema = db + .add_schema( + "schema_name", + vec![ + // scalar field + ("bool_field", FieldType::Bool), + // object field + ( + "relation_field", + FieldType::Relation(SchemaId::SchemaDefinition(1)), + ), + // list field + ( + "list_field", + FieldType::RelationList(SchemaId::SchemaDefinition(1)), + ), + ], + &key_pair, + ) + .await; + let type_name = schema.id().to_string(); + + let client = graphql_test_client(&db).await; + let response = client + .post("/graphql") + .json(&json!({ + "query": format!( + r#"{{ + schemaFields: __type(name: "{}") {{ + description, + fields {{ + name, + type {{ + kind, + name + }} + }} + }} + }}"#, + format!("{}Fields", type_name), + ), + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "schemaFields": { + "description": "Data fields available on documents of this schema.", + "fields": [{ + "name": "bool_field", + "type": { + "kind": "SCALAR", + "name": "Boolean" + } + },{ + "name": "list_field", + "type": { + "kind": "LIST", + "name": Value::Null + } + },{ + "name": "relation_field", + "type": { + "kind": "OBJECT", + "name": "schema_definition_v1" + } + }] + } + }); + + assert_eq!(response.data, expected_data, "\n{:#?}\n", response.errors); + }); +} + +#[rstest] +fn metadata_type(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let client = graphql_test_client(&db).await; + let response = client + .post("/graphql") + .json(&json!({ + "query": r#"{ + __type(name: "DocumentMeta") { + kind, + name, + description, + fields { + name, + type { + name + } + } + } + }"#, + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "__type": { + // Currently, all system schemas are object types. + "kind": "OBJECT", + "name": "DocumentMeta", + "description": "Metadata for documents of this schema.", + "fields": [{ + "name": "documentId", + "type": { + "name": "DocumentIdScalar" + } + }, + { + "name": "viewId", + "type": { + "name": "DocumentViewIdScalar" + } + }] + } + }); + + assert_eq!(response.data, expected_data, "\n{:#?}\n", response.errors); + }); +} diff --git a/aquadoggo/src/graphql/client/dynamic_types/utils.rs b/aquadoggo/src/graphql/client/dynamic_types/utils.rs new file mode 100644 index 000000000..0c3010272 --- /dev/null +++ b/aquadoggo/src/graphql/client/dynamic_types/utils.rs @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::indexmap::IndexMap; +use async_graphql::registry::{MetaField, MetaType}; +use p2panda_rs::schema::FieldType; + +/// Get the GraphQL type name for a p2panda field type. +/// +/// GraphQL types for relations use the p2panda schema id as their name. +pub fn graphql_typename(operation_field_type: &FieldType) -> String { + match operation_field_type { + // Scalars + FieldType::Bool => "Boolean".to_string(), + FieldType::Int => "Int".to_string(), + FieldType::Float => "Float".to_string(), + FieldType::String => "String".to_string(), + + // Relations + FieldType::Relation(schema_id) => schema_id.to_string(), + FieldType::PinnedRelation(schema_id) => schema_id.to_string(), + FieldType::RelationList(schema_id) => format!("[{}]", schema_id), + FieldType::PinnedRelationList(schema_id) => format!("[{}]", schema_id), + } +} + +/// Make a simple [`MetaField`] with mostly default values. +pub fn metafield(name: &str, description: Option<&'static str>, type_name: &str) -> MetaField { + MetaField { + name: name.to_string(), + description, + ty: type_name.to_string(), + args: Default::default(), + deprecation: Default::default(), + cache_control: Default::default(), + external: false, + requires: None, + provides: None, + visible: None, + compute_complexity: None, + oneof: false, + } +} + +/// Make a simple object [`MetaType`] with mostly default values. +pub fn metaobject( + name: &str, + description: Option<&'static str>, + fields: IndexMap, +) -> MetaType { + MetaType::Object { + name: name.to_string(), + description, + visible: Some(|_| true), + fields, + cache_control: Default::default(), + extends: false, + keys: None, + is_subscription: false, + // Dynamic query objects don't have an association to a Rust type. + rust_typename: "__fake__", + } +} diff --git a/aquadoggo/src/graphql/client/mod.rs b/aquadoggo/src/graphql/client/mod.rs index 88fa3372c..5f43edd89 100644 --- a/aquadoggo/src/graphql/client/mod.rs +++ b/aquadoggo/src/graphql/client/mod.rs @@ -1,9 +1,18 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! API for p2panda clients to publish and query data on this node. +mod dynamic_query; +pub mod dynamic_types; mod mutation; mod query; -mod response; +mod static_query; +mod static_types; +#[cfg(test)] +mod tests; +mod utils; +pub use dynamic_query::DynamicQuery; pub use mutation::ClientMutationRoot; pub use query::ClientRoot; -pub use response::NextEntryArguments; +pub use static_query::StaticQuery; +pub use static_types::NextEntryArguments; diff --git a/aquadoggo/src/graphql/client/mutation.rs b/aquadoggo/src/graphql/client/mutation.rs index 3da254f47..341e7be5f 100644 --- a/aquadoggo/src/graphql/client/mutation.rs +++ b/aquadoggo/src/graphql/client/mutation.rs @@ -1,5 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! Mutation root. use async_graphql::{Context, Object, Result}; use p2panda_rs::entry::{decode_entry, EntrySigned}; use p2panda_rs::operation::{Operation, OperationEncoded, OperationId}; @@ -24,12 +25,12 @@ impl ClientMutationRoot { &self, ctx: &Context<'_>, #[graphql(name = "entry", desc = "Signed and encoded entry to publish")] - entry: scalars::EncodedEntry, + entry: scalars::EntrySignedScalar, #[graphql( name = "operation", desc = "p2panda operation representing the entry payload." )] - operation: scalars::EncodedOperation, + operation: scalars::EncodedOperationScalar, ) -> Result { let store = ctx.data::()?; let tx = ctx.data::()?; @@ -112,10 +113,11 @@ mod tests { use tokio::sync::broadcast; use crate::bus::ServiceMessage; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::domain::next_args; + use crate::graphql::GraphQLSchemaManager; use crate::http::{build_server, HttpServiceContext}; + use crate::schema::SchemaProvider; use crate::test_helpers::TestClient; fn to_hex(value: Value) -> String { @@ -225,9 +227,12 @@ mod tests { #[rstest] fn publish_entry(#[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request) { - runner.with_db_teardown(move |db: TestDatabase| async move { - let (tx, _) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let response = context.schema.execute(publish_entry_request).await; assert_eq!( @@ -249,9 +254,11 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, mut rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); context.schema.execute(publish_entry_request).await; @@ -269,9 +276,11 @@ mod tests { #[rstest] fn publish_entry_error_handling(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); let parameters = Variables::from_value(value!({ "entry": ENTRY_ENCODED.to_string(), @@ -293,9 +302,11 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); let client = TestClient::new(build_server(context)); let response = client @@ -325,12 +336,20 @@ mod tests { } #[rstest] - #[case::no_entry("", "", "Bytes to decode had length of 0")] - #[case::invalid_entry_bytes("AB01", "", "Could not decode author public key from bytes")] + #[case::no_entry( + "", + "", + "Failed to parse \"EntrySignedScalar\": Bytes to decode had length of 0" + )] + #[case::invalid_entry_bytes( + "AB01", + "", + "Failed to parse \"EntrySignedScalar\": Could not decode author public key from bytes" + )] #[case::invalid_entry_hex_encoding( "-/74='4,.=4-=235m-0 34.6-3", &OPERATION_ENCODED, - "invalid hex encoding in entry" + "Failed to parse \"EntrySignedScalar\": invalid hex encoding in entry" )] #[case::no_operation( &ENTRY_ENCODED, @@ -345,7 +364,7 @@ mod tests { #[case::invalid_operation_hex_encoding( &ENTRY_ENCODED, "0-25.-%5930n3544[{{{ @@@", - "invalid hex encoding in operation" + "Failed to parse \"EncodedOperationScalar\": invalid hex encoding in operation" )] #[case::operation_does_not_match( &ENTRY_ENCODED, @@ -363,12 +382,12 @@ mod tests { #[case::valid_entry_with_extra_hex_char_at_end( &{ENTRY_ENCODED.to_string() + "A"}, &OPERATION_ENCODED, - "invalid hex encoding in entry" + "Failed to parse \"EntrySignedScalar\": invalid hex encoding in entry" )] #[case::valid_entry_with_extra_hex_char_at_start( &{"A".to_string() + &ENTRY_ENCODED}, &OPERATION_ENCODED, - "invalid hex encoding in entry" + "Failed to parse \"EntrySignedScalar\": invalid hex encoding in entry" )] #[case::should_not_have_skiplink( &entry_signed_encoded_unvalidated( @@ -380,7 +399,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode payload hash DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode payload hash DecodeError" )] #[case::should_not_have_backlink( &entry_signed_encoded_unvalidated( @@ -392,7 +411,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode payload hash DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode payload hash DecodeError" )] #[case::should_not_have_backlink_or_skiplink( &entry_signed_encoded_unvalidated( @@ -404,7 +423,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode payload hash DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode payload hash DecodeError" )] #[case::missing_backlink( &entry_signed_encoded_unvalidated( @@ -416,7 +435,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode backlink yamf hash: DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode backlink yamf hash: DecodeError" )] #[case::missing_skiplink( &entry_signed_encoded_unvalidated( @@ -428,7 +447,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode backlink yamf hash: DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode backlink yamf hash: DecodeError" )] #[case::should_not_include_skiplink( &entry_signed_encoded_unvalidated( @@ -440,7 +459,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode payload hash DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode payload hash DecodeError" )] #[case::payload_hash_and_size_missing( &entry_signed_encoded_unvalidated( @@ -452,7 +471,7 @@ mod tests { key_pair(PRIVATE_KEY) ), &OPERATION_ENCODED, - "Could not decode payload hash DecodeError" + "Failed to parse \"EntrySignedScalar\": Could not decode payload hash DecodeError" )] #[case::create_operation_with_previous_operations( &entry_signed_encoded_unvalidated( @@ -500,9 +519,11 @@ mod tests { let operation_encoded = operation_encoded.to_string(); let expected_error_message = expected_error_message.to_string(); - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); let client = TestClient::new(build_server(context)); let publish_entry_request = publish_entry_request(&entry_encoded, &operation_encoded); @@ -631,9 +652,11 @@ mod tests { let operation_encoded = operation_encoded.to_string(); let expected_error_message = expected_error_message.to_string(); - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); let client = TestClient::new(build_server(context)); let publish_entry_request = publish_entry_request(&entry_encoded, &operation_encoded); @@ -660,12 +683,14 @@ mod tests { #[rstest] fn publish_many_entries(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let key_pairs = vec![KeyPair::new(), KeyPair::new()]; let num_of_entries = 13; let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store.clone(), tx); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store.clone(), tx, schema_provider).await; + let context = HttpServiceContext::new(manager); let client = TestClient::new(build_server(context)); for key_pair in &key_pairs { @@ -733,9 +758,12 @@ mod tests { #[with(1, 1, 1, false, SCHEMA_ID.parse().unwrap())] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|populated_db: TestDatabase| async move { + runner.with_db_teardown(|populated_db: TestDatabase| async move { let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(populated_db.store.clone(), tx); + let schema_provider = SchemaProvider::default(); + let manager = + GraphQLSchemaManager::new(populated_db.store.clone(), tx, schema_provider).await; + let context = HttpServiceContext::new(manager); let client = TestClient::new(build_server(context)); // Get the one entry from the store. diff --git a/aquadoggo/src/graphql/client/query.rs b/aquadoggo/src/graphql/client/query.rs index d8d7e3bc6..3dcbd3932 100644 --- a/aquadoggo/src/graphql/client/query.rs +++ b/aquadoggo/src/graphql/client/query.rs @@ -1,200 +1,16 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use async_graphql::{Context, Object, Result}; -use p2panda_rs::document::{DocumentId, DocumentViewId}; -use p2panda_rs::identity::Author; -use p2panda_rs::Validate; +//! Client API root. +use async_graphql::MergedObject; -use crate::db::provider::SqlStorage; -use crate::domain::next_args; -use crate::graphql::client::response::NextEntryArguments; -use crate::graphql::scalars; +use crate::graphql::client::{DynamicQuery, StaticQuery}; -/// GraphQL queries for the Client API. -#[derive(Default, Debug, Copy, Clone)] -pub struct ClientRoot; +/// Root query object for client api that contains a static and a dynamic part. +#[derive(MergedObject, Debug)] +pub struct ClientRoot(StaticQuery, DynamicQuery); -#[Object] impl ClientRoot { - /// Return required arguments for publishing the next entry. - async fn next_entry_args( - &self, - ctx: &Context<'_>, - #[graphql( - name = "publicKey", - desc = "Public key of author that will encode and sign the next entry \ - using the returned arguments" - )] - public_key: scalars::PublicKey, - #[graphql( - name = "documentId", - desc = "Document the entry's UPDATE or DELETE operation is referring to, \ - can be left empty when it is a CREATE operation" - )] - document_id: Option, - ) -> Result { - // @TODO: The api for `next_entry_args` needs to be updated to accept a `DocumentViewId` - - // Access the store from context. - let store = ctx.data::()?; - - // Convert and validate passed parameters. - let public_key: Author = public_key.into(); - let document_view_id: Option = document_id - .map(DocumentId::from) - .map(|id| id.as_str().parse().unwrap()); - - public_key.validate()?; - if let Some(ref document_view_id) = document_view_id { - document_view_id.validate()?; - } - - // Calculate next entry args. - next_args(store, &public_key, document_view_id.as_ref()).await - } -} - -#[cfg(test)] -mod tests { - use std::convert::TryFrom; - - use async_graphql::{value, Response}; - use p2panda_rs::identity::Author; - use rstest::rstest; - use serde_json::json; - use tokio::sync::broadcast; - - use crate::db::provider::SqlStorage; - use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; - use crate::http::build_server; - use crate::http::HttpServiceContext; - use crate::test_helpers::TestClient; - - #[rstest] - fn next_entry_args_valid_query(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(move |db: TestDatabase| async move { - let (tx, _) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); - let client = TestClient::new(build_server(context)); - - // Selected fields need to be alphabetically sorted because that's what the `json` - // macro that is used in the assert below produces. - let received_entry_args = client - .post("/graphql") - .json(&json!({ - "query": r#"{ - nextEntryArgs( - publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a" - ) { - logId, - seqNum, - backlink, - skiplink - } - }"#, - })) - .send() - .await - .json::() - .await; - - assert_eq!( - received_entry_args.data, - value!({ - "nextEntryArgs": { - "logId": "0", - "seqNum": "1", - "backlink": null, - "skiplink": null, - } - }) - ); - }) - } - - #[rstest] - fn next_entry_args_valid_query_with_document_id( - #[with(1, 1, 1)] - #[from(test_db)] - runner: TestDatabaseRunner, - ) { - runner.with_db_teardown(move |db: TestDatabase| async move { - let (tx, _) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); - let client = TestClient::new(build_server(context)); - - let document_id = db.test_data.documents.get(0).unwrap(); - let author = - Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); - - // Selected fields need to be alphabetically sorted because that's what the `json` - // macro that is used in the assert below produces. - let received_entry_args = client - .post("/graphql") - .json(&json!({ - "query": - format!( - "{{ - nextEntryArgs( - publicKey: \"{}\", - documentId: \"{}\" - ) {{ - logId, - seqNum, - backlink, - skiplink - }} - }}", - author.as_str(), - document_id.as_str() - ) - })) - .send() - .await - .json::() - .await; - - assert!(received_entry_args.is_ok()); - assert_eq!( - received_entry_args.data, - value!({ - "nextEntryArgs": { - "logId": "0", - "seqNum": "2", - "backlink": "0020c8e09edd863b308f9c60b8ba506f29da512d0c9b5a131287f402c57777af5678", - "skiplink": null, - } - }) - ); - }) - } - - #[rstest] - fn next_entry_args_error_response(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(move |db: TestDatabase| async move { - let (tx, _) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); - let client = TestClient::new(build_server(context)); - - // Selected fields need to be alphabetically sorted because that's what the `json` macro - // that is used in the assert below produces. - let response = client - .post("/graphql") - .json(&json!({ - "query": r#"{ - nextEntryArgs(publicKey: "nope") { - logId - } - }"#, - })) - .send() - .await; - - let response: Response = response.json().await; - assert_eq!( - response.errors[0].message, - "invalid hex encoding in author string" - ) - }) + pub fn new() -> Self { + Self(StaticQuery::default(), DynamicQuery::default()) } } diff --git a/aquadoggo/src/graphql/client/static_query.rs b/aquadoggo/src/graphql/client/static_query.rs new file mode 100644 index 000000000..fb088fc52 --- /dev/null +++ b/aquadoggo/src/graphql/client/static_query.rs @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Static fields of the client api. +use async_graphql::{Context, Object, Result}; +use p2panda_rs::document::{DocumentId, DocumentViewId}; +use p2panda_rs::identity::Author; +use p2panda_rs::Validate; + +use crate::db::provider::SqlStorage; +use crate::domain::next_args; +use crate::graphql::client::NextEntryArguments; +use crate::graphql::scalars; + +/// GraphQL queries for the Client API. +#[derive(Default, Debug, Copy, Clone)] +pub struct StaticQuery; + +#[Object] +impl StaticQuery { + /// Return required arguments for publishing the next entry. + async fn next_entry_args( + &self, + ctx: &Context<'_>, + #[graphql( + name = "publicKey", + desc = "Public key of author that will encode and sign the next entry \ + using the returned arguments" + )] + public_key: scalars::PublicKeyScalar, + #[graphql( + name = "documentId", + desc = "Document the entry's UPDATE or DELETE operation is referring to, \ + can be left empty when it is a CREATE operation" + )] + document_id: Option, + ) -> Result { + // @TODO: The api for `next_entry_args` needs to be updated to accept a `DocumentViewId` + + // Access the store from context. + let store = ctx.data::()?; + + // Convert and validate passed parameters. + let public_key: Author = public_key.into(); + let document_id = document_id.map(|val| DocumentId::from(&val)); + + public_key.validate()?; + if let Some(ref document_view_id) = document_id { + document_view_id.validate()?; + } + + // Convert document_id into document_view_id and unwrap as we already validated above. + let document_view_id: Option = + document_id.map(|id| id.as_str().parse().unwrap()); + + // Calculate next entry args. + next_args(store, &public_key, document_view_id.as_ref()).await + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use async_graphql::{value, Response}; + use p2panda_rs::identity::Author; + use rstest::rstest; + use serde_json::json; + use tokio::sync::broadcast; + + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::graphql::GraphQLSchemaManager; + use crate::http::{build_server, HttpServiceContext}; + use crate::schema::SchemaProvider; + use crate::test_helpers::TestClient; + + #[rstest] + fn next_entry_args_valid_query(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); + + // Selected fields need to be alphabetically sorted because that's what the `json` + // macro that is used in the assert below produces. + let received_entry_args = client + .post("/graphql") + .json(&json!({ + "query": r#"{ + nextEntryArgs( + publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a" + ) { + logId, + seqNum, + backlink, + skiplink + } + }"#, + })) + .send() + .await + .json::() + .await; + + assert_eq!( + received_entry_args.data, + value!({ + "nextEntryArgs": { + "logId": "0", + "seqNum": "1", + "backlink": null, + "skiplink": null, + } + }) + ); + }) + } + #[rstest] + fn next_entry_args_valid_query_with_document_id( + #[with(1, 1, 1)] + #[from(test_db)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); + + let document_id = db.test_data.documents.get(0).unwrap(); + let author = + Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap(); + + // Selected fields need to be alphabetically sorted because that's what the `json` + // macro that is used in the assert below produces. + let received_entry_args = client + .post("/graphql") + .json(&json!({ + "query": + format!( + "{{ + nextEntryArgs( + publicKey: \"{}\", + documentId: \"{}\" + ) {{ + logId, + seqNum, + backlink, + skiplink + }} + }}", + author.as_str(), + document_id.as_str() + ) + })) + .send() + .await + .json::() + .await; + + assert!(received_entry_args.is_ok()); + assert_eq!( + received_entry_args.data, + value!({ + "nextEntryArgs": { + "logId": "0", + "seqNum": "2", + "backlink": "0020c8e09edd863b308f9c60b8ba506f29da512d0c9b5a131287f402c57777af5678", + "skiplink": null, + } + }) + ); + }) + } + + #[rstest] + fn next_entry_args_error_response(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); + + let response = client + .post("/graphql") + .json(&json!({ + "query": r#"{ + nextEntryArgs(publicKey: "nope") { + logId + } + }"#, + })) + .send() + .await; + + let response: Response = response.json().await; + assert_eq!( + response.errors[0].message, + "Failed to parse \"PublicKeyScalar\": invalid hex encoding in author string" + ) + }) + } +} diff --git a/aquadoggo/src/graphql/client/static_types/mod.rs b/aquadoggo/src/graphql/client/static_types/mod.rs new file mode 100644 index 000000000..3c389ba3f --- /dev/null +++ b/aquadoggo/src/graphql/client/static_types/mod.rs @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +mod next_entry_arguments; + +pub use next_entry_arguments::NextEntryArguments; diff --git a/aquadoggo/src/graphql/client/response.rs b/aquadoggo/src/graphql/client/static_types/next_entry_arguments.rs similarity index 88% rename from aquadoggo/src/graphql/client/response.rs rename to aquadoggo/src/graphql/client/static_types/next_entry_arguments.rs index 90f278fe5..e2f7d79c1 100644 --- a/aquadoggo/src/graphql/client/response.rs +++ b/aquadoggo/src/graphql/client/static_types/next_entry_arguments.rs @@ -10,11 +10,11 @@ use crate::graphql::scalars; pub struct NextEntryArguments { /// Log id of the entry. #[graphql(name = "logId")] - pub log_id: scalars::LogId, + pub log_id: scalars::LogIdScalar, /// Sequence number of the entry. #[graphql(name = "seqNum")] - pub seq_num: scalars::SeqNum, + pub seq_num: scalars::SeqNumScalar, /// Hash of the entry backlink. pub backlink: Option, diff --git a/aquadoggo/src/graphql/client/tests.rs b/aquadoggo/src/graphql/client/tests.rs new file mode 100644 index 000000000..116262dc2 --- /dev/null +++ b/aquadoggo/src/graphql/client/tests.rs @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Integration tests for dynamic graphql schema generation and query resolution. +use std::convert::TryInto; + +use async_graphql::{value, Response}; +use p2panda_rs::document::DocumentId; +use p2panda_rs::operation::OperationFields; +use p2panda_rs::schema::FieldType; +use p2panda_rs::test_utils::fixtures::random_key_pair; +use rstest::rstest; +use serde_json::json; + +use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; +use crate::test_helpers::graphql_test_client; + +#[rstest] +fn scalar_fields(#[from(test_db)] runner: TestDatabaseRunner) { + // Test querying application documents with scalar fields (no relations) by document id and by + // view id. + + runner.with_db_teardown(&|mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + // Add schema to node. + let schema = db + .add_schema( + "schema_name", + vec![ + ("bool", FieldType::Bool), + ("float", FieldType::Float), + ("int", FieldType::Int), + ("text", FieldType::String), + ], + &key_pair, + ) + .await; + + // Publish document on node. + let doc_fields = vec![ + ("bool", true.into()), + ("float", (1.0).into()), + ("int", 1.into()), + ("text", "yes".into()), + ] + .try_into() + .unwrap(); + let view_id = db.add_document(schema.id(), doc_fields, &key_pair).await; + + // Configure and send test query. + let client = graphql_test_client(&db).await; + let query = format!( + r#"{{ + scalarDoc: {type_name}(viewId: "{view_id}") {{ + fields {{ + bool, + float, + int, + text + }} + }}, + }}"#, + type_name = schema.id(), + view_id = view_id + ); + + let response = client + .post("/graphql") + .json(&json!({ + "query": query, + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "scalarDoc": { + "fields": { + "bool": true, + "float": 1.0, + "int": 1, + "text": "yes", + } + }, + }); + assert_eq!(response.data, expected_data); + }); +} + +#[rstest] +fn relation_fields(#[from(test_db)] runner: TestDatabaseRunner) { + // Test querying application documents across a parent-child relation using different kinds of + // relation fields. + + runner.with_db_teardown(&|mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + // Add schemas to node. + let child_schema = db + .add_schema("child", vec![("it_works", FieldType::Bool)], &key_pair) + .await; + + let parent_schema = db + .add_schema( + "parent", + vec![ + ( + "by_relation", + FieldType::Relation(child_schema.id().clone()), + ), + ( + "by_pinned_relation", + FieldType::PinnedRelation(child_schema.id().clone()), + ), + ( + "by_relation_list", + FieldType::RelationList(child_schema.id().clone()), + ), + ( + "by_pinned_relation_list", + FieldType::PinnedRelationList(child_schema.id().clone()), + ), + ], + &key_pair, + ) + .await; + + // Publish child document on node. + let child_view_id = db + .add_document( + child_schema.id(), + vec![("it_works", true.into())].try_into().unwrap(), + &key_pair, + ) + .await; + // There is only one operation so view id = doc id. + let child_doc_id: DocumentId = child_view_id.to_string().parse().unwrap(); + + // Publish parent document on node. + let parent_fields: OperationFields = vec![ + ("by_relation", child_doc_id.clone().into()), + ("by_pinned_relation", child_view_id.clone().into()), + ("by_relation_list", vec![child_doc_id].into()), + ("by_pinned_relation_list", vec![child_view_id].into()), + ] + .try_into() + .unwrap(); + + let parent_view_id = db + .add_document(parent_schema.id(), parent_fields, &key_pair) + .await; + + // Configure and send test query. + let client = graphql_test_client(&db).await; + let query = format!( + r#"{{ + result: {}(viewId: "{}") {{ + fields {{ + by_relation {{ fields {{ it_works }} }}, + by_pinned_relation {{ fields {{ it_works }} }}, + by_relation_list {{ fields {{ it_works }} }}, + by_pinned_relation_list {{ fields {{ it_works }} }}, + }} + }} + }}"#, + parent_schema.id(), + parent_view_id, + ); + + let response = client + .post("/graphql") + .json(&json!({ + "query": query, + })) + .send() + .await; + + let response: Response = response.json().await; + + let expected_data = value!({ + "result": { + "fields": { + "by_relation": { + "fields": { + "it_works": true + } + }, + "by_pinned_relation": { + "fields": { + "it_works": true + } + }, + "by_relation_list": [{ + "fields": { + "it_works": true + } + }], + "by_pinned_relation_list": [{ + "fields": { + "it_works": true + } + }] + } + } + }); + + assert_eq!(response.data, expected_data,); + }); +} diff --git a/aquadoggo/src/graphql/client/utils.rs b/aquadoggo/src/graphql/client/utils.rs new file mode 100644 index 000000000..8022adc5f --- /dev/null +++ b/aquadoggo/src/graphql/client/utils.rs @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::{Pos, ServerError, ServerResult}; +use p2panda_rs::document::DocumentViewId; +use p2panda_rs::schema::SchemaId; + +use crate::db::provider::SqlStorage; + +/// Validate that the given view matches the given schema. +/// +/// Panics if the view id does not exist in the store. +pub async fn validate_view_matches_schema( + document_view_id: &DocumentViewId, + schema_id: &SchemaId, + store: &SqlStorage, + pos: Option, +) -> ServerResult<()> { + let document_schema_id = store + .get_schema_by_document_view(document_view_id) + .await + .map_err(|err| ServerError::new(err.to_string(), None))? + .ok_or_else(|| ServerError::new("View not found".to_string(), None))?; + + match &document_schema_id == schema_id { + true => Ok(()), + false => Err(ServerError::new( + format!( + "Found but it does not belong to expected ", + document_view_id, schema_id + ), + pos, + )), + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use p2panda_rs::schema::{FieldType, SchemaId}; + use p2panda_rs::test_utils::fixtures::random_key_pair; + use rstest::rstest; + + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::graphql::client::utils::validate_view_matches_schema; + + #[rstest] + fn test_validate_view_matches_schema(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(&|mut db: TestDatabase| async move { + let key_pair = random_key_pair(); + + let view_id = db + .add_document( + &SchemaId::SchemaFieldDefinition(1), + vec![ + ("name", "test_field".into()), + ("type", FieldType::String.into()), + ] + .try_into() + .unwrap(), + &key_pair, + ) + .await; + + assert!(validate_view_matches_schema( + &view_id, + &SchemaId::SchemaFieldDefinition(1), + &db.store, + None + ) + .await + .is_ok()); + + assert!(validate_view_matches_schema( + &view_id, + &SchemaId::SchemaDefinition(1), + &db.store, + None + ) + .await + .is_err()); + }); + } +} diff --git a/aquadoggo/src/graphql/mod.rs b/aquadoggo/src/graphql/mod.rs index 73f89b55b..1fb980f57 100644 --- a/aquadoggo/src/graphql/mod.rs +++ b/aquadoggo/src/graphql/mod.rs @@ -1,9 +1,10 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! Provides GraphQL APIs for both replication and interfacing with p2panda clients. pub mod client; pub mod pagination; pub mod replication; pub mod scalars; mod schema; -pub use schema::{build_root_schema, MutationRoot, QueryRoot, RootSchema}; +pub use schema::{build_root_schema, GraphQLSchemaManager, QueryRoot, RootSchema}; diff --git a/aquadoggo/src/graphql/pagination.rs b/aquadoggo/src/graphql/pagination.rs index f3572ef92..1723933b8 100644 --- a/aquadoggo/src/graphql/pagination.rs +++ b/aquadoggo/src/graphql/pagination.rs @@ -1,5 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! Generic types for pagination through the GraphQL API. use serde::{Deserialize, Serialize}; /// Generic pagination response of GraphQL connection API. diff --git a/aquadoggo/src/graphql/replication/client.rs b/aquadoggo/src/graphql/replication/client.rs index 6efb2d490..bdf786924 100644 --- a/aquadoggo/src/graphql/replication/client.rs +++ b/aquadoggo/src/graphql/replication/client.rs @@ -17,7 +17,7 @@ use crate::graphql::scalars; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct Response { - entries_newer_than_seq_num: Paginated, + entries_newer_than_seq_num: Paginated, } /// Attempts to get entries newer than the given sequence number for a public key and log id. diff --git a/aquadoggo/src/graphql/replication/mod.rs b/aquadoggo/src/graphql/replication/mod.rs index 10a51aa26..b11e7717f 100644 --- a/aquadoggo/src/graphql/replication/mod.rs +++ b/aquadoggo/src/graphql/replication/mod.rs @@ -1,5 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +//! API for replicating data with other p2panda nodes pub mod client; mod query; mod response; diff --git a/aquadoggo/src/graphql/replication/query.rs b/aquadoggo/src/graphql/replication/query.rs index ada1aa2f2..d42fead80 100644 --- a/aquadoggo/src/graphql/replication/query.rs +++ b/aquadoggo/src/graphql/replication/query.rs @@ -18,7 +18,7 @@ const DEFAULT_PAGINATION_SIZE: usize = 10; /// Response type for paginated queries. type ConnectionResult = - Connection; + Connection; /// GraphQL queries for the Replication API. #[derive(Default, Debug, Copy, Clone)] @@ -48,10 +48,11 @@ impl ReplicationRoot { async fn entry_by_log_id_and_seq_num<'a>( &self, ctx: &Context<'a>, - #[graphql(name = "logId", desc = "Log id of entry")] log_id: scalars::LogId, - #[graphql(name = "seqNum", desc = "Sequence number of entry")] seq_num: scalars::SeqNum, + #[graphql(name = "logId", desc = "Log id of entry")] log_id: scalars::LogIdScalar, + #[graphql(name = "seqNum", desc = "Sequence number of entry")] + seq_num: scalars::SeqNumScalar, #[graphql(name = "publicKey", desc = "Public key of the entry author")] - public_key: scalars::PublicKey, + public_key: scalars::PublicKeyScalar, ) -> Result { let store = ctx.data::()?; @@ -73,14 +74,14 @@ impl ReplicationRoot { async fn entries_newer_than_seq_num<'a>( &self, ctx: &Context<'a>, - #[graphql(name = "logId", desc = "Log id of entries")] log_id: scalars::LogId, + #[graphql(name = "logId", desc = "Log id of entries")] log_id: scalars::LogIdScalar, #[graphql(name = "publicKey", desc = "Public key of the author")] - public_key: scalars::PublicKey, + public_key: scalars::PublicKeyScalar, #[graphql( name = "seqNum", desc = "Query entries starting from this sequence number" )] - seq_num: Option, + seq_num: Option, first: Option, after: Option, ) -> Result { @@ -91,7 +92,7 @@ impl ReplicationRoot { None, first, None, - |after: Option, _, first, _| async move { + |after: Option, _, first, _| async move { // Add `seq_num` to the `after` cursor to get starting sequence number let seq_num = seq_num.map(|seq| seq.as_u64()).unwrap_or_else(|| 0); let start: u64 = seq_num + after.map(|a| a.as_u64()).unwrap_or_else(|| 0); @@ -140,7 +141,7 @@ impl ReplicationRoot { } /// Use sequence numbers as cursor to paginate entry queries. -impl CursorType for scalars::SeqNum { +impl CursorType for scalars::SeqNumScalar { type Error = Error; fn decode_cursor(str: &str) -> Result { @@ -162,7 +163,6 @@ mod tests { use p2panda_rs::test_utils::fixtures::random_hash; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{ populate_test_db, test_db, with_db_manager_teardown, PopulateDatabaseConfig, TestDatabase, TestDatabaseManager, TestDatabaseRunner, @@ -176,7 +176,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) @@ -217,7 +217,7 @@ mod tests { #[from(test_db)] runner: TestDatabaseRunner, #[from(random_hash)] random_hash: Hash, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) @@ -240,7 +240,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) @@ -288,7 +288,7 @@ mod tests { #[rstest] fn entry_by_log_id_and_seq_num_not_found(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store) diff --git a/aquadoggo/src/graphql/replication/response.rs b/aquadoggo/src/graphql/replication/response.rs index 54979db13..16d1c6955 100644 --- a/aquadoggo/src/graphql/replication/response.rs +++ b/aquadoggo/src/graphql/replication/response.rs @@ -17,10 +17,10 @@ use crate::graphql::scalars; #[graphql(complex)] pub struct EncodedEntryAndOperation { /// Signed and encoded bamboo entry. - pub entry: scalars::EncodedEntry, + pub entry: scalars::EntrySignedScalar, /// p2panda operation, CBOR bytes encoded as hexadecimal string. - pub operation: Option, + pub operation: Option, } #[ComplexObject] @@ -29,7 +29,7 @@ impl EncodedEntryAndOperation { async fn certificate_pool<'a>( &self, ctx: &Context<'a>, - ) -> async_graphql::Result> { + ) -> async_graphql::Result> { let store = ctx.data::()?; // Decode entry @@ -81,7 +81,6 @@ mod tests { use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::graphql::replication::ReplicationRoot; @@ -91,7 +90,7 @@ mod tests { #[with(13, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let replication_root = ReplicationRoot::default(); let schema = Schema::build(replication_root, EmptyMutation, EmptySubscription) .data(db.store.clone()) diff --git a/aquadoggo/src/graphql/scalars/document_id.rs b/aquadoggo/src/graphql/scalars/document_id.rs deleted file mode 100644 index 58544c652..000000000 --- a/aquadoggo/src/graphql/scalars/document_id.rs +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use std::fmt::Display; - -use async_graphql::scalar; -use serde::{Deserialize, Serialize}; - -/// Id of a p2panda document. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct DocumentId(p2panda_rs::document::DocumentId); - -impl From for DocumentId { - fn from(document_id: p2panda_rs::document::DocumentId) -> Self { - Self(document_id) - } -} - -impl From for p2panda_rs::document::DocumentId { - fn from(document_id: DocumentId) -> p2panda_rs::document::DocumentId { - document_id.0 - } -} - -impl Display for DocumentId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -scalar!(DocumentId); diff --git a/aquadoggo/src/graphql/scalars/document_id_scalar.rs b/aquadoggo/src/graphql/scalars/document_id_scalar.rs new file mode 100644 index 000000000..127740bf4 --- /dev/null +++ b/aquadoggo/src/graphql/scalars/document_id_scalar.rs @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::fmt::Display; + +use async_graphql::{InputValueError, InputValueResult, Scalar, ScalarType, Value}; +use p2panda_rs::document::DocumentId; +use serde::{Deserialize, Serialize}; + +/// Id of a p2panda document. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct DocumentIdScalar(DocumentId); + +#[Scalar] +impl ScalarType for DocumentIdScalar { + fn parse(value: Value) -> InputValueResult { + match &value { + Value::String(str_value) => { + let document_id = str_value.as_str().parse::()?; + Ok(DocumentIdScalar(document_id)) + } + _ => Err(InputValueError::expected_type(value)), + } + } + + fn to_value(&self) -> Value { + Value::String(self.0.as_str().to_string()) + } +} + +impl From<&DocumentId> for DocumentIdScalar { + fn from(document_id: &DocumentId) -> Self { + Self(document_id.clone()) + } +} + +impl From<&DocumentIdScalar> for DocumentId { + fn from(document_id: &DocumentIdScalar) -> DocumentId { + document_id.0.clone() + } +} + +impl Display for DocumentIdScalar { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0.as_str()) + } +} diff --git a/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs b/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs new file mode 100644 index 000000000..3de49911b --- /dev/null +++ b/aquadoggo/src/graphql/scalars/document_view_id_scalar.rs @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::fmt::Display; + +use async_graphql::{InputValueError, InputValueResult, Scalar, ScalarType, Value}; +use p2panda_rs::document::DocumentViewId; +use serde::{Deserialize, Serialize}; + +/// Document view id as a GraphQL scalar. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct DocumentViewIdScalar(DocumentViewId); + +#[Scalar] +impl ScalarType for DocumentViewIdScalar { + fn parse(value: Value) -> InputValueResult { + match &value { + Value::String(str_value) => { + let view_id = str_value.parse::()?; + Ok(DocumentViewIdScalar(view_id)) + } + _ => Err(InputValueError::expected_type(value)), + } + } + + fn to_value(&self) -> Value { + Value::String(self.0.to_string()) + } +} + +impl From<&DocumentViewId> for DocumentViewIdScalar { + fn from(value: &DocumentViewId) -> Self { + DocumentViewIdScalar(value.clone()) + } +} + +impl From<&DocumentViewIdScalar> for DocumentViewId { + fn from(value: &DocumentViewIdScalar) -> Self { + // Unwrap because `DocumentViewIdScalar` is always safely intialised. + DocumentViewId::new(value.0.graph_tips()).unwrap() + } +} + +impl Display for DocumentViewIdScalar { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/aquadoggo/src/graphql/scalars/encoded_entry.rs b/aquadoggo/src/graphql/scalars/encoded_entry.rs deleted file mode 100644 index 2647be347..000000000 --- a/aquadoggo/src/graphql/scalars/encoded_entry.rs +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use async_graphql::{scalar, Value}; -use p2panda_rs::entry::EntrySigned; -use serde::{Deserialize, Serialize}; - -/// Signed bamboo entry, encoded as a hexadecimal string. -#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] -pub struct EncodedEntry(EntrySigned); - -impl From for EncodedEntry { - fn from(entry: EntrySigned) -> Self { - Self(entry) - } -} - -impl From for EntrySigned { - fn from(entry: EncodedEntry) -> EntrySigned { - entry.0 - } -} - -impl From for Value { - fn from(entry: EncodedEntry) -> Self { - async_graphql::ScalarType::to_value(&entry) - } -} - -scalar!(EncodedEntry); diff --git a/aquadoggo/src/graphql/scalars/encoded_operation.rs b/aquadoggo/src/graphql/scalars/encoded_operation.rs deleted file mode 100644 index 299c40cfb..000000000 --- a/aquadoggo/src/graphql/scalars/encoded_operation.rs +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use async_graphql::scalar; -use p2panda_rs::operation::OperationEncoded; -use serde::{Deserialize, Serialize}; - -/// Entry payload and p2panda operation, CBOR bytes encoded as a hexadecimal string. -#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] -pub struct EncodedOperation(OperationEncoded); - -impl From for EncodedOperation { - fn from(operation: OperationEncoded) -> Self { - Self(operation) - } -} - -impl From for OperationEncoded { - fn from(operation: EncodedOperation) -> OperationEncoded { - operation.0 - } -} - -scalar!(EncodedOperation); diff --git a/aquadoggo/src/graphql/scalars/encoded_operation_scalar.rs b/aquadoggo/src/graphql/scalars/encoded_operation_scalar.rs new file mode 100644 index 000000000..86cfa44ab --- /dev/null +++ b/aquadoggo/src/graphql/scalars/encoded_operation_scalar.rs @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::{InputValueError, InputValueResult, Scalar, ScalarType, Value}; +use p2panda_rs::operation::OperationEncoded; +use serde::{Deserialize, Serialize}; + +/// Entry payload and p2panda operation, CBOR bytes encoded as a hexadecimal string. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +pub struct EncodedOperationScalar(OperationEncoded); + +#[Scalar] +impl ScalarType for EncodedOperationScalar { + fn parse(value: Value) -> InputValueResult { + match &value { + Value::String(str_value) => { + let panda_value = OperationEncoded::new(str_value)?; + Ok(EncodedOperationScalar(panda_value)) + } + _ => Err(InputValueError::expected_type(value)), + } + } + + fn to_value(&self) -> Value { + Value::String(self.0.as_str().to_string()) + } +} + +impl From for EncodedOperationScalar { + fn from(operation: OperationEncoded) -> Self { + Self(operation) + } +} + +impl From for OperationEncoded { + fn from(operation: EncodedOperationScalar) -> OperationEncoded { + operation.0 + } +} diff --git a/aquadoggo/src/graphql/scalars/entry_hash.rs b/aquadoggo/src/graphql/scalars/entry_hash_scalar.rs similarity index 100% rename from aquadoggo/src/graphql/scalars/entry_hash.rs rename to aquadoggo/src/graphql/scalars/entry_hash_scalar.rs diff --git a/aquadoggo/src/graphql/scalars/entry_signed_scalar.rs b/aquadoggo/src/graphql/scalars/entry_signed_scalar.rs new file mode 100644 index 000000000..41093aa8c --- /dev/null +++ b/aquadoggo/src/graphql/scalars/entry_signed_scalar.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use async_graphql::{InputValueError, Scalar, ScalarType, Value}; +use p2panda_rs::entry::EntrySigned; +use serde::{Deserialize, Serialize}; + +/// Signed bamboo entry, encoded as a hexadecimal string. +#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)] +pub struct EntrySignedScalar(EntrySigned); + +#[Scalar] +impl ScalarType for EntrySignedScalar { + fn parse(value: Value) -> async_graphql::InputValueResult { + match &value { + Value::String(str_value) => { + let panda_value = EntrySigned::new(str_value)?; + Ok(EntrySignedScalar(panda_value)) + } + _ => Err(InputValueError::expected_type(value)), + } + } + + fn to_value(&self) -> Value { + Value::String(self.0.as_str().to_string()) + } +} + +impl From for EntrySignedScalar { + fn from(entry: EntrySigned) -> Self { + Self(entry) + } +} + +impl From for EntrySigned { + fn from(entry: EntrySignedScalar) -> EntrySigned { + entry.0 + } +} + +impl From for Value { + fn from(entry: EntrySignedScalar) -> Self { + async_graphql::ScalarType::to_value(&entry) + } +} diff --git a/aquadoggo/src/graphql/scalars/log_id.rs b/aquadoggo/src/graphql/scalars/log_id_scalar.rs similarity index 72% rename from aquadoggo/src/graphql/scalars/log_id.rs rename to aquadoggo/src/graphql/scalars/log_id_scalar.rs index d5cf27df9..e6ccd86f5 100644 --- a/aquadoggo/src/graphql/scalars/log_id.rs +++ b/aquadoggo/src/graphql/scalars/log_id_scalar.rs @@ -3,25 +3,26 @@ use std::fmt::Display; use async_graphql::scalar; +use p2panda_rs::entry::LogId; use serde::{Deserialize, Serialize}; /// Log id of a bamboo entry. #[derive(Clone, Copy, Eq, PartialEq, Debug)] -pub struct LogId(p2panda_rs::entry::LogId); +pub struct LogIdScalar(LogId); -impl From for LogId { - fn from(log_id: p2panda_rs::entry::LogId) -> Self { +impl From for LogIdScalar { + fn from(log_id: LogId) -> Self { Self(log_id) } } -impl From for p2panda_rs::entry::LogId { - fn from(log_id: LogId) -> p2panda_rs::entry::LogId { +impl From for LogId { + fn from(log_id: LogIdScalar) -> LogId { log_id.0 } } -impl Serialize for LogId { +impl Serialize for LogIdScalar { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -32,44 +33,45 @@ impl Serialize for LogId { } } -impl<'de> Deserialize<'de> for LogId { +impl<'de> Deserialize<'de> for LogIdScalar { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { let str: String = Deserialize::deserialize(deserializer)?; - let log_id: p2panda_rs::entry::LogId = str + let log_id: LogId = str .parse() .map_err(|_| serde::de::Error::custom("Could not parse log_id string as u64"))?; - Ok(LogId(log_id)) + Ok(LogIdScalar(log_id)) } } -impl Display for LogId { +impl Display for LogIdScalar { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0.as_u64()) } } -scalar!(LogId); +scalar!(LogIdScalar); #[cfg(test)] mod tests { + use p2panda_rs::entry::LogId; use serde::{Deserialize, Serialize}; - use super::LogId; + use super::LogIdScalar; #[test] fn serde_log_id_as_string() { #[derive(Serialize, Deserialize, PartialEq, Debug)] struct Value { - log_id: LogId, + log_id: LogIdScalar, } let val = Value { - log_id: p2panda_rs::entry::LogId::default().into(), + log_id: LogId::default().into(), }; let serialised = serde_json::to_string(&val).unwrap(); diff --git a/aquadoggo/src/graphql/scalars/mod.rs b/aquadoggo/src/graphql/scalars/mod.rs index 2924fc5bc..abc9a9148 100644 --- a/aquadoggo/src/graphql/scalars/mod.rs +++ b/aquadoggo/src/graphql/scalars/mod.rs @@ -1,17 +1,26 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -mod document_id; -mod encoded_entry; -mod encoded_operation; -mod entry_hash; -mod log_id; -mod public_key; -mod seq_num; +//! GraphQL scalar types wrapping core p2panda types. +//! +//! All scalar types are safely converted into the corresponding p2panda type when provided as +//! arguments or response values in `async_graphql`. +//! +//! We use a naming convention of appending the item's GraphQL type (e.g. `Scalar`) when a p2panda +//! item of the exact same name is being wrapped. +mod document_id_scalar; +mod document_view_id_scalar; +mod encoded_operation_scalar; +mod entry_hash_scalar; +mod entry_signed_scalar; +mod log_id_scalar; +mod public_key_scalar; +mod seq_num_scalar; -pub use document_id::DocumentId; -pub use encoded_entry::EncodedEntry; -pub use encoded_operation::EncodedOperation; -pub use entry_hash::EntryHash; -pub use log_id::LogId; -pub use public_key::PublicKey; -pub use seq_num::SeqNum; +pub use document_id_scalar::DocumentIdScalar; +pub use document_view_id_scalar::DocumentViewIdScalar; +pub use encoded_operation_scalar::EncodedOperationScalar; +pub use entry_hash_scalar::EntryHash; +pub use entry_signed_scalar::EntrySignedScalar; +pub use log_id_scalar::LogIdScalar; +pub use public_key_scalar::PublicKeyScalar; +pub use seq_num_scalar::SeqNumScalar; diff --git a/aquadoggo/src/graphql/scalars/public_key.rs b/aquadoggo/src/graphql/scalars/public_key.rs deleted file mode 100644 index 23b447994..000000000 --- a/aquadoggo/src/graphql/scalars/public_key.rs +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-or-later - -use std::fmt::Display; - -use async_graphql::scalar; -use p2panda_rs::identity::Author; -use serde::{Deserialize, Serialize}; - -/// Public key of the entry author. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct PublicKey(Author); - -impl From for PublicKey { - fn from(author: Author) -> Self { - Self(author) - } -} - -impl From for Author { - fn from(public_key: PublicKey) -> Author { - public_key.0 - } -} - -impl Display for PublicKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -scalar!(PublicKey); diff --git a/aquadoggo/src/graphql/scalars/public_key_scalar.rs b/aquadoggo/src/graphql/scalars/public_key_scalar.rs new file mode 100644 index 000000000..ac9624334 --- /dev/null +++ b/aquadoggo/src/graphql/scalars/public_key_scalar.rs @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +use std::fmt::Display; + +use async_graphql::{InputValueError, Scalar, ScalarType, Value}; +use p2panda_rs::identity::Author; +use serde::{Deserialize, Serialize}; + +/// Public key that signed the entry. +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct PublicKeyScalar(Author); + +#[Scalar] +impl ScalarType for PublicKeyScalar { + fn parse(value: Value) -> async_graphql::InputValueResult { + match &value { + Value::String(str_value) => { + let panda_value: Author = str_value.parse()?; + Ok(PublicKeyScalar(panda_value)) + } + _ => Err(InputValueError::expected_type(value)), + } + } + + fn to_value(&self) -> Value { + Value::String(self.0.as_str().to_string()) + } +} + +impl From for PublicKeyScalar { + fn from(author: Author) -> Self { + Self(author) + } +} + +impl From for Author { + fn from(public_key: PublicKeyScalar) -> Author { + public_key.0 + } +} + +impl Display for PublicKeyScalar { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/aquadoggo/src/graphql/scalars/seq_num.rs b/aquadoggo/src/graphql/scalars/seq_num_scalar.rs similarity index 75% rename from aquadoggo/src/graphql/scalars/seq_num.rs rename to aquadoggo/src/graphql/scalars/seq_num_scalar.rs index a945b23ea..cbb5435c3 100644 --- a/aquadoggo/src/graphql/scalars/seq_num.rs +++ b/aquadoggo/src/graphql/scalars/seq_num_scalar.rs @@ -6,14 +6,14 @@ use std::str::FromStr; use anyhow::Result; use async_graphql::scalar; -use p2panda_rs::entry::SeqNumError; +use p2panda_rs::entry::{SeqNum, SeqNumError}; use serde::{Deserialize, Serialize}; /// Sequence number of an entry. #[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct SeqNum(p2panda_rs::entry::SeqNum); +pub struct SeqNumScalar(SeqNum); -impl SeqNum { +impl SeqNumScalar { /// Return sequence number as u64. pub fn as_u64(&self) -> u64 { self.0.as_u64() @@ -27,39 +27,39 @@ impl SeqNum { } /// Convert from p2panda types to GraphQL scalars and back. -impl From for SeqNum { - fn from(seq_num: p2panda_rs::entry::SeqNum) -> Self { +impl From for SeqNumScalar { + fn from(seq_num: SeqNum) -> Self { Self(seq_num) } } -impl From for p2panda_rs::entry::SeqNum { - fn from(seq_num: SeqNum) -> p2panda_rs::entry::SeqNum { +impl From for SeqNum { + fn from(seq_num: SeqNumScalar) -> SeqNum { seq_num.0 } } /// Convert from strings to sequence number. -impl FromStr for SeqNum { +impl FromStr for SeqNumScalar { type Err = SeqNumError; fn from_str(str: &str) -> Result { let num = u64::from_str(str).map_err(|_| SeqNumError::InvalidU64String)?; - Ok(Self(p2panda_rs::entry::SeqNum::new(num)?)) + Ok(Self(SeqNum::new(num)?)) } } -impl TryFrom for SeqNum { +impl TryFrom for SeqNumScalar { type Error = SeqNumError; fn try_from(str: String) -> Result { - SeqNum::from_str(&str) + SeqNumScalar::from_str(&str) } } /// Represent u64 sequence number as string to be able to encode large numbers in GraphQL JSON /// response. -impl Serialize for SeqNum { +impl Serialize for SeqNumScalar { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -68,14 +68,14 @@ impl Serialize for SeqNum { } } -impl<'de> Deserialize<'de> for SeqNum { +impl<'de> Deserialize<'de> for SeqNumScalar { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { let str: String = Deserialize::deserialize(deserializer)?; - let seq_num: SeqNum = str + let seq_num: SeqNumScalar = str .try_into() .map_err(|_| serde::de::Error::custom("Could not parse seq_num string as u64"))?; @@ -83,29 +83,30 @@ impl<'de> Deserialize<'de> for SeqNum { } } -impl Display for SeqNum { +impl Display for SeqNumScalar { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0.as_u64()) } } -scalar!(SeqNum); +scalar!(SeqNumScalar); #[cfg(test)] mod tests { + use p2panda_rs::entry::SeqNum; use serde::{Deserialize, Serialize}; - use super::SeqNum; + use super::SeqNumScalar; #[test] fn serde_seq_num_as_string() { #[derive(Serialize, Deserialize, PartialEq, Debug)] struct Value { - seq_num: SeqNum, + seq_num: SeqNumScalar, } let val = Value { - seq_num: p2panda_rs::entry::SeqNum::new(1).unwrap().into(), + seq_num: SeqNum::new(1).unwrap().into(), }; let serialised = serde_json::to_string(&val).unwrap(); diff --git a/aquadoggo/src/graphql/schema.rs b/aquadoggo/src/graphql/schema.rs index f867ed397..8dbe8a694 100644 --- a/aquadoggo/src/graphql/schema.rs +++ b/aquadoggo/src/graphql/schema.rs @@ -1,34 +1,292 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use async_graphql::{EmptySubscription, MergedObject, Schema}; +//! Build and manage a GraphQL schema including dynamic parts of the schema. +use std::sync::Arc; + +use async_graphql::{EmptySubscription, MergedObject, Request, Response, Schema}; +use log::{debug, info}; +use tokio::sync::Mutex; use crate::bus::ServiceSender; use crate::db::provider::SqlStorage; use crate::graphql::client::{ClientMutationRoot, ClientRoot}; use crate::graphql::replication::ReplicationRoot; +use crate::schema::{save_static_schemas, SchemaProvider}; -/// All of the graphql query sub modules merged into one top level root. +/// All of the GraphQL query sub modules merged into one top level root. #[derive(MergedObject, Debug)] pub struct QueryRoot(pub ReplicationRoot, pub ClientRoot); -/// All of the graphql mutation sub modules merged into one top level root. +/// All of the GraphQL mutation sub modules merged into one top level root. #[derive(MergedObject, Debug, Copy, Clone, Default)] pub struct MutationRoot(pub ClientMutationRoot); /// GraphQL schema for p2panda node. pub type RootSchema = Schema; -/// Build the root graphql schema that can handle graphql requests. -pub fn build_root_schema(store: SqlStorage, tx: ServiceSender) -> RootSchema { +/// Returns GraphQL API schema for p2panda node. +/// +/// Builds the root schema that can handle all GraphQL requests from clients (Client API) or other +/// nodes (Node API). +pub fn build_root_schema( + store: SqlStorage, + tx: ServiceSender, + schema_provider: SchemaProvider, +) -> RootSchema { + // Configure query root let replication_root = ReplicationRoot::default(); - let client_query_root = ClientRoot::default(); + let client_query_root = ClientRoot::new(); let query_root = QueryRoot(replication_root, client_query_root); + // Configure mutation root let client_mutation_root = ClientMutationRoot::default(); let mutation_root = MutationRoot(client_mutation_root); + // Build GraphQL schema Schema::build(query_root, mutation_root, EmptySubscription) .data(store) + .data(schema_provider) .data(tx) .finish() } + +/// Returns GraphQL API schema for p2panda node with a little trick to make dynamic schemas work. +/// +/// The `async_graphql` crate we're using in this project does only provide methods to generate +/// GraphQL schemas statically. Ideally we would like to query our database for currently known +/// p2panda schemas and accordingly update the GraphQL schema whenever necessary but we don't have +/// static and sync access to the database when building `async_graphql` types. +/// +/// With this little workaround we are still able to make it work! We load the p2panda schemas from +/// the database and write them into a temporary in-memory store. When `async_graphql` builds the +/// GraphQL schema we can load from this store statically to build the schemas on the fly. +async fn build_schema_with_workaround(shared: GraphQLSharedData) -> RootSchema { + // Store all application schemas from database into static in-memory storage + let all_schemas = shared.schema_provider.all().await; + save_static_schemas(&all_schemas); + + // Build the actual GraphQL root schema, this will internally read the created JSON file and + // accordingly build the schema + build_root_schema(shared.store, shared.tx, shared.schema_provider) +} + +/// List of created GraphQL root schemas. +type GraphQLSchemas = Arc>>; + +/// Shared types between GraphQL schemas. +#[derive(Clone, Debug)] +pub struct GraphQLSharedData { + /// Database interface. + store: SqlStorage, + + /// Communication bus interface to send messages to other services. + tx: ServiceSender, + + /// Schema provider giving us access to currently known schemas. + schema_provider: SchemaProvider, +} + +/// Builds new GraphQL schemas dynamically and executes the latest GraphQL schema for incoming +/// queries. +/// +/// This manager allows us to introduce new GraphQL schemas during runtime as it internally handles +/// a list of schemas (behind a mutex) and automatically picks the "latest" as soon as a query +/// needs to be executed. +/// +/// With this we can easily add "new" schemas to the list in the background while current queries +/// still get processed using the "old" schema. +// +// @TODO: This manager does not "clean up" outdated schemas yet, they will just be appended to +// an ever-growing list. +// +// WARNING: As soon as we start implementing GraphQL schema clean-up, we need to make sure to also +// free the used memory for all leaked schema data we've created. Otherwise this will lead to a +// memory leak! See `static_provider` module for more information (and useful tools) on this whole +// topic. +#[derive(Clone)] +pub struct GraphQLSchemaManager { + /// List of all built GraphQL root schemas. + schemas: GraphQLSchemas, + + /// Commonly shared types for GraphQL schemas. + shared: GraphQLSharedData, +} + +impl GraphQLSchemaManager { + /// Returns a new instance of `GraphQLSchemaManager`. + pub async fn new( + store: SqlStorage, + tx: ServiceSender, + schema_provider: SchemaProvider, + ) -> Self { + let schemas = Arc::new(Mutex::new(Vec::new())); + let shared = GraphQLSharedData { + store, + tx, + schema_provider, + }; + + // Create manager instance and spawn internal watch task + let manager = Self { schemas, shared }; + manager.spawn_schema_added_task().await; + + manager + } + + /// Subscribes to `SchemaProvider` for newly added schemas. + /// + /// This spawns a task which listens to new p2panda schemas to accordingly build a GraphQL + /// schema which will be added to the list. + async fn spawn_schema_added_task(&self) { + let shared = self.shared.clone(); + let schemas = self.schemas.clone(); + + info!("Subscribing Graphql manager to schema provider"); + let mut on_schema_added = shared.schema_provider.on_schema_added(); + + // Create the new GraphQL based on the current state of known p2panda application schemas + async fn rebuild(shared: GraphQLSharedData, schemas: GraphQLSchemas) { + let schema = build_schema_with_workaround(shared).await; + schemas.lock().await.push(schema); + } + + // Always build a schema right at the beginning as we don't have one yet + rebuild(shared.clone(), schemas.clone()).await; + debug!("Finished building initial GraphQL schema"); + + // Spawn a task which reacts to newly registered p2panda schemas + tokio::task::spawn(async move { + loop { + match on_schema_added.recv().await { + Ok(schema_id) => { + info!("Changed schema {}, rebuilding GraphQL API", schema_id); + rebuild(shared.clone(), schemas.clone()).await; + } + Err(err) => { + panic!("Failed receiving schema updates: {}", err) + } + } + } + }); + } + + /// Executes an incoming GraphQL query. + /// + /// This method makes sure the GraphQL query will be executed by the latest given schema the + /// manager knows about. + pub async fn execute(&self, request: impl Into) -> Response { + self.schemas + .lock() + .await + .last() + .expect("No schema given yet") + .execute(request) + .await + } +} + +impl std::fmt::Debug for GraphQLSchemaManager { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // `schemas` does not implement `Debug` but we can at least print the other fields + f.debug_struct("GraphQLSchemaManager") + .field("shared", &self.shared) + .finish() + } +} + +#[cfg(test)] +mod test { + use async_graphql::{value, Response}; + use p2panda_rs::schema::FieldType; + use p2panda_rs::test_utils::constants::PRIVATE_KEY; + use p2panda_rs::test_utils::fixtures::key_pair; + use rstest::rstest; + use serde_json::{json, Value}; + + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::test_helpers::graphql_test_client; + + #[rstest] + fn schema_updates(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |mut db: TestDatabase| async move { + // Create test client in the beginning so it is initialised with just the system + // schemas. Then we create a new application schema to test that the graphql schema + // is updated and we can query the changed schema. + let client = graphql_test_client(&db).await; + + // This test uses a fixed private key to allow us to anticipate the schema typename. + let key_pair = key_pair(PRIVATE_KEY); + let type_name = + "schema_name_002050d1a071ef2061eb3ddb1f1420b22a52687ed134a9ba9f094b237104b4e7058c"; + + // Check that the schema does not exist yet. + let response = client + .post("/graphql") + .json(&json!({ + "query": format!( + r#"{{ + schema: __type(name: "{}") {{ + name, + }}, + }}"#, + type_name, + ), + })) + .send() + .await; + let response: Response = response.json().await; + + assert_eq!( + response.data, + value!({ + "schema": Value::Null, + }), + "\n{:#?}\n", + response.errors + ); + + // Add schema to node. + let schema = db + .add_schema( + "schema_name", + vec![("bool_field", FieldType::Bool)], + &key_pair, + ) + .await; + + assert_eq!( + schema.id().to_string(), + type_name, + "Please update `type_name` const above to fix this test." + ); + + // Query gql schema. + let response = client + .post("/graphql") + .json(&json!({ + "query": format!( + r#"{{ + schema: __type(name: "{}") {{ + name, + }}, + }}"#, + type_name, + ), + })) + .send() + .await; + let response: Response = response.json().await; + + assert_eq!( + response.data, + value!({ + "schema": { + "name": type_name + }, + }), + "\n{:#?}\n", + response.errors + ); + }); + } +} diff --git a/aquadoggo/src/http/context.rs b/aquadoggo/src/http/context.rs index e0475a2d3..38c40a2e5 100644 --- a/aquadoggo/src/http/context.rs +++ b/aquadoggo/src/http/context.rs @@ -1,20 +1,16 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use crate::bus::ServiceSender; -use crate::db::provider::SqlStorage; -use crate::graphql::{build_root_schema, RootSchema}; +use crate::graphql::GraphQLSchemaManager; #[derive(Clone)] pub struct HttpServiceContext { - /// Root GraphQL schema. - pub schema: RootSchema, + /// Dynamic GraphQL schema manager. + pub schema: GraphQLSchemaManager, } impl HttpServiceContext { /// Create a new HttpServiceContext. - pub fn new(store: SqlStorage, tx: ServiceSender) -> Self { - Self { - schema: build_root_schema(store, tx), - } + pub fn new(schema: GraphQLSchemaManager) -> Self { + Self { schema } } } diff --git a/aquadoggo/src/http/service.rs b/aquadoggo/src/http/service.rs index b69fb16a9..b72feca95 100644 --- a/aquadoggo/src/http/service.rs +++ b/aquadoggo/src/http/service.rs @@ -12,6 +12,7 @@ use tower_http::cors::{Any, CorsLayer}; use crate::bus::ServiceSender; use crate::context::Context; +use crate::graphql::GraphQLSchemaManager; use crate::http::api::{handle_graphql_playground, handle_graphql_query}; use crate::http::context::HttpServiceContext; use crate::manager::{ServiceReadySender, Shutdown}; @@ -48,8 +49,12 @@ pub async fn http_service( let http_port = context.config.http_port; let http_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), http_port); + // Prepare GraphQL manager executing incoming GraphQL queries via HTTP + let graphql_schema_manager = + GraphQLSchemaManager::new(context.store.clone(), tx, context.schema_provider.clone()).await; + // Introduce a new context for all HTTP routes - let http_context = HttpServiceContext::new(context.store.clone(), tx); + let http_context = HttpServiceContext::new(graphql_schema_manager); axum::Server::try_bind(&http_address)? .serve(build_server(http_context).into_make_service()) @@ -72,18 +77,22 @@ mod tests { use serde_json::json; use tokio::sync::broadcast; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::graphql::GraphQLSchemaManager; use crate::http::context::HttpServiceContext; + use crate::schema::SchemaProvider; use crate::test_helpers::TestClient; use super::build_server; #[rstest] fn graphql_endpoint(#[from(test_db)] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let (tx, _) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); + let schema_provider = SchemaProvider::default(); + let graphql_schema_manager = + GraphQLSchemaManager::new(db.store, tx, schema_provider).await; + let context = HttpServiceContext::new(graphql_schema_manager); let client = TestClient::new(build_server(context)); let response = client diff --git a/aquadoggo/src/lib.rs b/aquadoggo/src/lib.rs index ef8a3845a..46f031162 100644 --- a/aquadoggo/src/lib.rs +++ b/aquadoggo/src/lib.rs @@ -34,6 +34,7 @@ mod test_helpers; pub use crate::config::Configuration; pub use crate::replication::ReplicationConfiguration; pub use node::Node; +pub use schema::SchemaProvider; /// Init env_logger before the test suite runs to handle logging outputs. /// diff --git a/aquadoggo/src/materializer/mod.rs b/aquadoggo/src/materializer/mod.rs index 40a646cfc..e9c5c45e1 100644 --- a/aquadoggo/src/materializer/mod.rs +++ b/aquadoggo/src/materializer/mod.rs @@ -2,7 +2,7 @@ mod input; mod service; -mod tasks; +pub(crate) mod tasks; mod worker; pub use input::TaskInput; diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index ec1764fb9..b73e06ef3 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -154,7 +154,6 @@ mod tests { use tokio::task; use crate::context::Context; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{send_to_store, test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::{Task, TaskInput}; @@ -170,7 +169,7 @@ mod tests { runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Identify document and operation which was inserted for testing let document_id = db.test_data.documents.first().unwrap(); let verified_operation = db @@ -251,7 +250,7 @@ mod tests { runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Identify document and operation which was inserted for testing let document_id = db.test_data.documents.first().unwrap(); @@ -321,7 +320,7 @@ mod tests { runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Identify key_[air, document and operation which was inserted for testing let key_pair = db.test_data.key_pairs.first().unwrap(); let document_id = db.test_data.documents.first().unwrap(); @@ -368,7 +367,7 @@ mod tests { .unwrap(); // Wait a little bit for work being done .. - tokio::time::sleep(Duration::from_millis(50)).await; + tokio::time::sleep(Duration::from_millis(100)).await; // Then straight away publish an UPDATE on this document and send it over the bus too. let (entry_encoded, _) = send_to_store( @@ -393,7 +392,7 @@ mod tests { .unwrap(); // Wait a little bit for work being done .. - tokio::time::sleep(Duration::from_millis(50)).await; + tokio::time::sleep(Duration::from_millis(100)).await; // Make sure the service did not crash and is still running assert_eq!(handle.is_finished(), false); @@ -461,7 +460,7 @@ mod tests { key_pair: KeyPair, ) { // Prepare empty database - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { // Prepare arguments for service let context = Context::new( db.store.clone(), @@ -474,7 +473,7 @@ mod tests { tokio::time::sleep(Duration::from_millis(100)).await; } }); - let (tx, _) = broadcast::channel(1024); + let (tx, _rx) = broadcast::channel(1024); // Start materializer service let tx_clone = tx.clone(); diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index 25a65cbf8..e7bdd045a 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -11,6 +11,8 @@ use crate::materializer::TaskInput; /// A dependency task prepares _reduce_ tasks for all pinned relations of a given document view. /// +/// The `input` argument must contain only a view id. +/// /// This task is dispatched after a reduce task completes. It identifies any pinned relations /// present in a given document view as we need to guarantee the required document views are /// materialised and stored in the database. We may have the required operations on the node @@ -148,6 +150,7 @@ async fn construct_relation_task( document_view_id: DocumentViewId, ) -> Result>, TaskError> { debug!("Get view for pinned relation with id: {}", document_view_id); + match context .store .get_document_view_by_id(&document_view_id) @@ -187,7 +190,6 @@ mod tests { use crate::config::Configuration; use crate::context::Context; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{ insert_entry_operation_and_view, send_to_store, test_db, TestDatabase, TestDatabaseRunner, }; @@ -307,7 +309,7 @@ mod tests { #[case] runner: TestDatabaseRunner, #[case] expected_next_tasks: usize, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -347,7 +349,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -411,7 +413,7 @@ mod tests { #[case] document_view_id: Option, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store, Configuration::default(), @@ -457,7 +459,7 @@ mod tests { ) )] fn fails_on_deleted_documents(#[case] runner: TestDatabaseRunner) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -494,7 +496,7 @@ mod tests { ])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -579,7 +581,7 @@ mod tests { ])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -590,8 +592,6 @@ mod tests { // the store. let schema_field_document_id = db.test_data.documents.first().unwrap(); - println!("THIS {}", schema_field_document_id.as_str()); - // Materialise the schema field definition. let input = TaskInput::new(Some(schema_field_document_id.to_owned()), None); reduce_task(context.clone(), input.clone()).await.unwrap(); diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index 435141a26..b74db22ed 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -3,13 +3,25 @@ use log::{debug, info}; use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; use p2panda_rs::operation::VerifiedOperation; -use p2panda_rs::storage_provider::traits::OperationStore; +use p2panda_rs::storage_provider::traits::{OperationStore, StorageProvider}; use crate::context::Context; use crate::db::traits::DocumentStore; use crate::materializer::worker::{Task, TaskError, TaskResult}; use crate::materializer::TaskInput; +/// Build a materialised view for a document by reducing the document's operation graph and storing to disk. +/// +/// ## Task input +/// +/// If the task input contains a document view id, only this view is stored and any document id +/// also existing on the task input is ignored. +/// +/// If the task input contains a document id, the latest view for that document is built and stored +/// and also, the document itself is updated in the store. +/// +/// ## Integration with other tasks +/// /// A reduce task is dispatched for every entry and operation pair which arrives at a node. /// /// They may also be dispatched from a dependency task when a pinned relations is present on an @@ -66,8 +78,8 @@ pub async fn reduce_task(context: Context, input: TaskInput) -> TaskResult( + context: &Context, input: &TaskInput, ) -> Result, TaskError> { match (&input.document_id, &input.document_view_id) { @@ -168,7 +180,7 @@ async fn reduce_document( return Ok(None); } - info!("Stored {} view {}", document, document.view_id()); + info!("Stored {} latest view {}", document, document.view_id()); // Return the new document_view id to be used in the resulting dependency task Ok(Some(document.view_id().to_owned())) @@ -195,7 +207,6 @@ mod tests { use crate::config::Configuration; use crate::context::Context; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{send_to_store, test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; @@ -216,7 +227,7 @@ mod tests { )] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store, Configuration::default(), @@ -245,7 +256,7 @@ mod tests { #[with(1, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_id = db.test_data.documents.first().unwrap(); let key_pair = db.test_data.key_pairs.first().unwrap(); @@ -299,7 +310,7 @@ mod tests { #[with( 2, 1, 1, false, SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let document_operations = db .store .get_operations_by_document_id(&db.test_data.documents[0]) @@ -360,7 +371,7 @@ mod tests { #[with(3, 1, 20, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -407,7 +418,7 @@ mod tests { #[case] runner: TestDatabaseRunner, #[case] is_next_task: bool, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), @@ -429,7 +440,7 @@ mod tests { #[case] document_view_id: Option, #[from(test_db)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store, Configuration::default(), @@ -450,7 +461,7 @@ mod tests { #[from(random_document_view_id)] document_view_id: DocumentViewId, ) { // Prepare empty database. - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), diff --git a/aquadoggo/src/materializer/tasks/schema.rs b/aquadoggo/src/materializer/tasks/schema.rs index 14b199f34..f8cd74afb 100644 --- a/aquadoggo/src/materializer/tasks/schema.rs +++ b/aquadoggo/src/materializer/tasks/schema.rs @@ -126,7 +126,6 @@ mod tests { use rstest::rstest; use crate::context::Context; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{send_to_store, test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; @@ -139,7 +138,7 @@ mod tests { /// Insert a test schema definition and schema field definition and run reduce tasks for both. async fn create_schema_documents( context: &Context, - db: &TestDatabase, + db: &TestDatabase, ) -> (DocumentViewId, DocumentViewId) { // Create field definition let create_field_definition = Operation::new_create( @@ -202,7 +201,7 @@ mod tests { .to_owned(); debug!("Created schema definition {}", definition_view_id); - return (definition_view_id, field_view_id); + (definition_view_id, field_view_id) } #[rstest] @@ -211,7 +210,7 @@ mod tests { #[with(1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(|db: TestDatabase| async move { + runner.with_db_teardown(|db: TestDatabase| async move { let context = Context::new( db.store.clone(), Configuration::default(), diff --git a/aquadoggo/src/materializer/worker.rs b/aquadoggo/src/materializer/worker.rs index f2fe461d0..5ea338a0b 100644 --- a/aquadoggo/src/materializer/worker.rs +++ b/aquadoggo/src/materializer/worker.rs @@ -495,10 +495,8 @@ where } } - // Trigger status update when successful - if result.is_ok() { - on_complete(item.input()); - } + // Trigger removing the task from the task store + on_complete(item.input()); // Check the result match result { diff --git a/aquadoggo/src/schema/mod.rs b/aquadoggo/src/schema/mod.rs index 4b7ccd75d..6c235963f 100644 --- a/aquadoggo/src/schema/mod.rs +++ b/aquadoggo/src/schema/mod.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later mod schema_provider; +mod static_schema_provider; pub use schema_provider::SchemaProvider; +pub use static_schema_provider::{load_static_schemas, save_static_schemas}; diff --git a/aquadoggo/src/schema/schema_provider.rs b/aquadoggo/src/schema/schema_provider.rs index 8742d86c2..5c41e9ddb 100644 --- a/aquadoggo/src/schema/schema_provider.rs +++ b/aquadoggo/src/schema/schema_provider.rs @@ -3,18 +3,23 @@ use std::collections::HashMap; use std::sync::Arc; -use log::info; +use log::{debug, info, warn}; use p2panda_rs::schema::{Schema, SchemaId, SYSTEM_SCHEMAS}; +use tokio::sync::broadcast::{channel, Receiver, Sender}; use tokio::sync::Mutex; /// Provides fast thread-safe access to system and application schemas. /// /// Application schemas can be added and updated. #[derive(Clone, Debug)] -pub struct SchemaProvider(Arc>>); +pub struct SchemaProvider { + /// In-memory store of registered schemas. + schemas: Arc>>, + + /// Sender for broadcast channel informing subscribers about updated schemas. + tx: Sender, +} -// Dead code allowed until this is used for https://github.com/p2panda/aquadoggo/pull/141 -#[allow(dead_code)] impl SchemaProvider { /// Returns a `SchemaProvider` containing the given application schemas and all system schemas. pub fn new(application_schemas: Vec) -> Self { @@ -27,17 +32,37 @@ impl SchemaProvider { for schema in schemas { index.insert(schema.id().to_owned(), schema.to_owned()); } - Self(Arc::new(Mutex::new(index))) + + let (tx, _) = channel(64); + + debug!( + "Initialised schema provider:\n- {}", + index + .values() + .map(|schema| schema.to_string()) + .collect::>() + .join("\n- ") + ); + + Self { + schemas: Arc::new(Mutex::new(index)), + tx, + } + } + + /// Returns receiver for broadcast channel. + pub fn on_schema_added(&self) -> Receiver { + self.tx.subscribe() } /// Retrieve a schema that may be a system or application schema by its schema id. pub async fn get(&self, schema_id: &SchemaId) -> Option { - self.0.lock().await.get(schema_id).cloned() + self.schemas.lock().await.get(schema_id).cloned() } /// Returns all system and application schemas. pub async fn all(&self) -> Vec { - self.0.lock().await.values().cloned().collect() + self.schemas.lock().await.values().cloned().collect() } /// Inserts or updates the given schema in this provider. @@ -45,8 +70,17 @@ impl SchemaProvider { /// Returns `true` if a schema was updated and `false` if it was inserted. pub async fn update(&self, schema: Schema) -> bool { info!("Updating {}", schema); - let mut schemas = self.0.lock().await; - schemas.insert(schema.id().clone(), schema).is_some() + let mut schemas = self.schemas.lock().await; + let is_update = schemas + .insert(schema.id().clone(), schema.clone()) + .is_some(); + + // Inform subscribers about new schema + if self.tx.send(schema.id().to_owned()).is_err() { + warn!("No subscriber has been informed about inserted / updated schema"); + } + + is_update } } diff --git a/aquadoggo/src/schema/static_schema_provider.rs b/aquadoggo/src/schema/static_schema_provider.rs new file mode 100644 index 000000000..9438b5fa5 --- /dev/null +++ b/aquadoggo/src/schema/static_schema_provider.rs @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: AGPL-3.0-or-later + +//! Global provider for schemas which serves as a workaround to get dynamic schemas into the +//! `async-graphql` crate. Dynamic meaning that the GraphQL schema changes during runtime. +//! +//! The methods and structs provided here help us to fullfil the API of `async-graphql` which gives +//! us somewhat a path to introduce new GraphQL queries during runtime but only with some +//! limitations we need to work around with. +//! +//! Note that the `async-graphql` crate does officially not have support for dynamic schemas, +//! meaning it is designed to build one static GraphQL schema once in the beginning and then keep +//! it until the end of the program. See: +//! +//! Together with the `GraphQLSchemaManager` and these methods here we can still introduce new +//! GraphQL schemas during the programs runtime, this is why we call them "dynamic schemas". +//! +//! The limitations of `async-graphql` we need to get around are: +//! +//! 1. `OutputType::create_type_info` is an associated function which does not provide any access +//! to `self`, so we are limited in what data we can provide from the "outside" (like p2panda +//! schemas) to build new GraphQL types. +//! +//! The solution here is to introduce a global storage we can both access from inside the +//! `create_type_info` function to read from it and other places to write to it. +//! +//! 2. Fields in `MetaType::Object` like `description` require a 'static lifetime. +//! +//! The solution here is to make use of `Box::leak` which removes the ownership over the inner +//! value, making it a value with 'static lifetime. Together with the global storage we can +//! bring data into these `async-graphql` functions AND give them a 'static lifetime. +//! +//! This is a fairly hacky workaround and can cause memory leaks when not handled properly. This +//! is why this module provides a `StaticLeak` struct which should help us to clean up after +//! ourselves. +use std::sync::Mutex; + +use once_cell::sync::Lazy; +use p2panda_rs::schema::Schema; + +/// Global schema provider containing all application and system schemas which will be used to +/// build the next GraphQL schema. +/// +/// This is similar to `SchemaProvider` though serving a slightly different purpose, as it is a +/// workaround for `async-graphql` (see module-level description for details), the contained data +/// should be identical though. +static SCHEMA_PROVIDER: Lazy>> = Lazy::new(|| Mutex::new(Vec::new())); + +/// Replaces the current content of the global schema provider with new data. +pub fn save_static_schemas(data: &[Schema]) { + let mut schemas = SCHEMA_PROVIDER + .lock() + .expect("Could not acquire mutex lock for static schema provider"); + + schemas.clear(); + schemas.append(&mut data.to_vec()); +} + +/// Reads the current schemas of the global schema provider and returns the result in a 'static +/// lifetime. +/// +/// Warning: The returned data needs to be manually freed when not being used anymore, otherwise it +/// will cause a memory leak. +/// +/// @TODO: For now we're fine as we do not clean up any dynamically generated schemas (see +/// `GraphQLSchemaManager`). This means that the returned schema arrays here will anyhow life as +/// long as the whole program. As soon as we start removing "old" dynamically generated schemas +/// from the `GraphQLSchemaManager` we will have to also manually deallocate that used memory from +/// here. See `StaticLeak` struct below which might help us with this. +pub fn load_static_schemas() -> &'static Vec { + let data = SCHEMA_PROVIDER + .lock() + .expect("Could not acquire mutex lock for static schema provider") + .to_vec(); + + Box::leak(Box::new(data)) +} diff --git a/aquadoggo/src/test_helpers.rs b/aquadoggo/src/test_helpers.rs index 7ee5a2bec..99bbae1a4 100644 --- a/aquadoggo/src/test_helpers.rs +++ b/aquadoggo/src/test_helpers.rs @@ -14,11 +14,15 @@ use once_cell::sync::Lazy; use serde::Deserialize; use sqlx::migrate::MigrateDatabase; use sqlx::Any; +use tokio::sync::broadcast; use tokio::task::{self, JoinHandle}; use tower::make::Shared; use tower_service::Service; +use crate::db::stores::test_utils::TestDatabase; use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; +use crate::graphql::GraphQLSchemaManager; +use crate::http::{build_server, HttpServiceContext}; /// Configuration used in test helper methods. #[derive(Deserialize, Debug)] @@ -47,7 +51,7 @@ impl Default for TestConfiguration { pub static TEST_CONFIG: Lazy = Lazy::new(|| TestConfiguration::new()); -pub(crate) struct TestClient { +pub struct TestClient { client: reqwest::Client, addr: SocketAddr, } @@ -95,6 +99,15 @@ impl TestClient { } } +/// Configures a test client that can be used for GraphQL testing. +pub async fn graphql_test_client(db: &TestDatabase) -> TestClient { + let (tx, _) = broadcast::channel(16); + let manager = + GraphQLSchemaManager::new(db.store.clone(), tx, db.context.schema_provider.clone()).await; + let http_context = HttpServiceContext::new(manager); + TestClient::new(build_server(http_context)) +} + pub(crate) struct RequestBuilder { builder: reqwest::RequestBuilder, } diff --git a/aquadoggo/src/validation.rs b/aquadoggo/src/validation.rs index c764d4859..00e769d0e 100644 --- a/aquadoggo/src/validation.rs +++ b/aquadoggo/src/validation.rs @@ -178,7 +178,6 @@ mod tests { use p2panda_rs::test_utils::fixtures::{key_pair, random_document_id}; use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use super::{ @@ -258,7 +257,7 @@ mod tests { #[with(2, 2, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { // Unwrap the passed document id or select the first valid one from the database. let document_id = document_id.unwrap_or_else(|| db.test_data.documents.first().unwrap().to_owned()); @@ -291,7 +290,7 @@ mod tests { #[with(7, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); get_expected_skiplink(&db.store, &author, &log_id, &seq_num) @@ -320,7 +319,7 @@ mod tests { #[with(10, 1, 1)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); let skiplink_entry = @@ -339,7 +338,7 @@ mod tests { #[with(3, 1, 1, true)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let document_id = db.test_data.documents.first().unwrap(); ensure_document_not_deleted(&db.store, document_id) .await @@ -353,7 +352,7 @@ mod tests { #[with(3, 1, 1, false)] runner: TestDatabaseRunner, ) { - runner.with_db_teardown(move |db: TestDatabase| async move { + runner.with_db_teardown(move |db: TestDatabase| async move { let document_id = db.test_data.documents.first().unwrap(); assert!(ensure_document_not_deleted(&db.store, document_id) .await