From 5a1a845ab1b6af8dcdb5ac60b94f193dbb614ad4 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 23 Jun 2022 11:46:48 +0100 Subject: [PATCH] End-to-end publishEntry tests (#167) * Use fixture for publish_entry_request * Post gql publishEntry mutation * Make the test work * Add test for errors * WIP: Failing publish_entry requests * More failure cases * Test for publishing many entries * Add toto * Update CHANGELOG * Remove tests which panic before starting * Change p2panda branch * Update tests to catch new return errors * Tests for not spec compliant entries * fmt * Back to p2panda-rs main * p2panda-rs updates * Link to related issue --- CHANGELOG.md | 5 +- Cargo.lock | 3 +- aquadoggo/src/db/provider.rs | 3 +- aquadoggo/src/db/stores/schema.rs | 4 +- aquadoggo/src/db/stores/test_utils.rs | 4 +- aquadoggo/src/db/utils.rs | 4 +- aquadoggo/src/graphql/client/mutation.rs | 446 +++++++++++++++++++++-- 7 files changed, 437 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a584836f..eac48fc94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Reduce and dependency tasks [#144](https://github.com/p2panda/aquadoggo/pull/144) - GraphQL endpoints for replication [#100](https://github.com/p2panda/aquadoggo/pull/100) - Inform materialization service about new operations [#161](https://github.com/p2panda/aquadoggo/pull/161) +- e2e publish entry tests [#167](https://github.com/p2panda/aquadoggo/pull/167) - Reschedule pending tasks on startup [#168](https://github.com/p2panda/aquadoggo/pull/168) ### Changed @@ -46,7 +47,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [0.2.0] -*Please note: `aquadoggo-rs` crate is not published yet, due to unpublished dependencies.* +_Please note: `aquadoggo-rs` crate is not published yet, due to unpublished dependencies._ ### Changed @@ -79,6 +80,6 @@ Released on 2021-10-25: :package: [`crate`](https://crates.io/crates/aquadoggo/0 - Use p2panda-rs 0.2.1 with fixed linter setting [#41](https://github.com/p2panda/aquadoggo/41) - Use `tide` for HTTP server and `jsonrpc-v2` for JSON RPC [#29](https://github.com/p2panda/aquadoggo/29) -[Unreleased]: https://github.com/p2panda/aquadoggo/compare/v0.2.0...HEAD +[unreleased]: https://github.com/p2panda/aquadoggo/compare/v0.2.0...HEAD [0.2.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.2.0 [0.1.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.1.0 diff --git a/Cargo.lock b/Cargo.lock index aa8b2f8b6..c82fdc3d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2391,7 +2391,7 @@ dependencies = [ [[package]] name = "p2panda-rs" version = "0.3.0" -source = "git+https://github.com/p2panda/p2panda?branch=main#2a73825700d914034d5da9b352a28f1acd7c710d" +source = "git+https://github.com/p2panda/p2panda?branch=main#da6cb0d41047261e23d043a45f204ba37cbbfad2" dependencies = [ "arrayvec 0.5.2", "async-trait", @@ -2420,6 +2420,7 @@ dependencies = [ "serde_repr", "thiserror", "tls_codec", + "varu64", "wasm-bindgen", "yasmf-hash", ] diff --git a/aquadoggo/src/db/provider.rs b/aquadoggo/src/db/provider.rs index 5d5c7f81c..5adfc8734 100644 --- a/aquadoggo/src/db/provider.rs +++ b/aquadoggo/src/db/provider.rs @@ -3,6 +3,7 @@ use async_trait::async_trait; use p2panda_rs::document::DocumentId; use p2panda_rs::hash::Hash; +use p2panda_rs::operation::VerifiedOperation; use p2panda_rs::storage_provider::traits::StorageProvider; use sqlx::query_scalar; @@ -29,7 +30,7 @@ impl SqlStorage { /// A `StorageProvider` implementation based on `sqlx` that supports SQLite and PostgreSQL /// databases. #[async_trait] -impl StorageProvider for SqlStorage { +impl StorageProvider for SqlStorage { type EntryArgsResponse = EntryArgsResponse; type EntryArgsRequest = EntryArgsRequest; type PublishEntryResponse = PublishEntryResponse; diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index eb6b0216a..46d1df332 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -50,7 +50,7 @@ impl SchemaStore for SqlStorage { schema_fields.push(scheme_field_view); } - let schema = Schema::new(schema_view, schema_fields)?; + let schema = Schema::from_views(schema_view, schema_fields)?; Ok(Some(schema)) } @@ -87,7 +87,7 @@ impl SchemaStore for SqlStorage { .map(|field| field.to_owned()) .collect(); - all_schema.push(Schema::new(schema_view, schema_fields)?); + all_schema.push(Schema::from_views(schema_view, schema_fields)?); } Ok(all_schema) diff --git a/aquadoggo/src/db/stores/test_utils.rs b/aquadoggo/src/db/stores/test_utils.rs index 9baf60cd4..1008e1029 100644 --- a/aquadoggo/src/db/stores/test_utils.rs +++ b/aquadoggo/src/db/stores/test_utils.rs @@ -7,8 +7,8 @@ use p2panda_rs::entry::{sign_and_encode, Entry}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::{Author, KeyPair}; use p2panda_rs::operation::{ - AsOperation, Operation, OperationEncoded, OperationId, OperationValue, PinnedRelation, - PinnedRelationList, Relation, RelationList, VerifiedOperation, + AsOperation, AsVerifiedOperation, Operation, OperationEncoded, OperationId, OperationValue, + PinnedRelation, PinnedRelationList, Relation, RelationList, VerifiedOperation, }; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::{ diff --git a/aquadoggo/src/db/utils.rs b/aquadoggo/src/db/utils.rs index 8619f5cb9..89973483c 100644 --- a/aquadoggo/src/db/utils.rs +++ b/aquadoggo/src/db/utils.rs @@ -5,8 +5,8 @@ use std::collections::BTreeMap; use p2panda_rs::document::{DocumentId, DocumentViewFields, DocumentViewId, DocumentViewValue}; use p2panda_rs::identity::Author; use p2panda_rs::operation::{ - Operation, OperationFields, OperationId, OperationValue, PinnedRelation, PinnedRelationList, - Relation, RelationList, VerifiedOperation, + AsVerifiedOperation, Operation, OperationFields, OperationId, OperationValue, PinnedRelation, + PinnedRelationList, Relation, RelationList, VerifiedOperation, }; use p2panda_rs::schema::SchemaId; diff --git a/aquadoggo/src/graphql/client/mutation.rs b/aquadoggo/src/graphql/client/mutation.rs index 6c6626a64..18badb8c7 100644 --- a/aquadoggo/src/graphql/client/mutation.rs +++ b/aquadoggo/src/graphql/client/mutation.rs @@ -84,14 +84,29 @@ impl ClientMutationRoot { #[cfg(test)] mod tests { + use std::convert::TryFrom; + use async_graphql::{from_value, value, Request, Value, Variables}; + use bamboo_rs_core_ed25519_yasmf::entry::is_lipmaa_required; use p2panda_rs::entry::{EntrySigned, LogId, SeqNum}; + use p2panda_rs::hash::Hash; + use p2panda_rs::identity::Author; + use p2panda_rs::operation::{Operation, OperationEncoded, OperationValue}; + use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; + use p2panda_rs::test_utils::constants::{DEFAULT_HASH, DEFAULT_PRIVATE_KEY, TEST_SCHEMA_ID}; + use p2panda_rs::test_utils::fixtures::{ + entry_signed_encoded_unvalidated, key_pair, operation, operation_encoded, operation_fields, + random_hash, + }; + use rstest::{fixture, rstest}; + use serde_json::json; use tokio::sync::broadcast; use crate::bus::ServiceMessage; + use crate::db::stores::test_utils::{test_db, TestSqlStore}; use crate::graphql::client::PublishEntryResponse; - use crate::http::HttpServiceContext; - use crate::test_helpers::initialize_store; + use crate::http::{build_server, HttpServiceContext}; + use crate::test_helpers::{initialize_store, TestClient}; const ENTRY_ENCODED: &str = "00bedabb435758855968b3e2de2aa1f653adfbb392fcf9cb2295a68b2eca3c\ fb030101a200204b771d59d76e820cbae493682003e99b795e4e7c86a8d6b4\ @@ -116,21 +131,34 @@ mod tests { } }"#; - #[tokio::test] - async fn publish_entry() { - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); + const UPDATE_OPERATION_NO_PREVIOUS_OPS: &str = "A466616374696F6E6675706461746566736368656D617849636861745F30303230633635353637616533376566656132393365333461396337643133663866326266323364626463336235633762396162343632393331313163343866633738626776657273696F6E01666669656C6473A1676D657373616765A26474797065637374726576616C7565764F68682C206D79206669727374206D65737361676521"; + const CREATE_OPERATION_WITH_PREVIOUS_OPS: &str = "A566616374696F6E6663726561746566736368656D617849636861745F30303230633635353637616533376566656132393365333461396337643133663866326266323364626463336235633762396162343632393331313163343866633738626776657273696F6E017370726576696F75735F6F7065726174696F6E738178443030323036356637346636666438316562316261653139656230643864636531343566616136613536643762343037366437666261343338353431303630396232626165666669656C6473A1676D657373616765A26474797065637374726576616C75657357686963682049206E6F77207570646174652E"; + + const DELETE_OPERATION_NO_PREVIOUS_OPS: &str = "A366616374696F6E6664656C65746566736368656D617849636861745F30303230633635353637616533376566656132393365333461396337643133663866326266323364626463336235633762396162343632393331313163343866633738626776657273696F6E01"; + + #[fixture] + fn publish_entry_request( + #[default(ENTRY_ENCODED)] entry_encoded: &str, + #[default(OPERATION_ENCODED)] operation_encoded: &str, + ) -> Request { // Prepare GraphQL mutation publishing an entry let parameters = Variables::from_value(value!({ - "entryEncoded": ENTRY_ENCODED, - "operationEncoded": OPERATION_ENCODED + "entryEncoded": entry_encoded, + "operationEncoded": operation_encoded, })); - // Process mutation with given schema - let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); - let response = context.schema.execute(request).await; + Request::new(PUBLISH_ENTRY_QUERY).variables(parameters) + } + + #[rstest] + #[tokio::test] + async fn publish_entry(publish_entry_request: Request) { + let (tx, _rx) = broadcast::channel(16); + let store = initialize_store().await; + let context = HttpServiceContext::new(store, tx); + + let response = context.schema.execute(publish_entry_request).await; let received: PublishEntryResponse = match response.data { Value::Object(result_outer) => { from_value(result_outer.get("publishEntry").unwrap().to_owned()).unwrap() @@ -152,21 +180,14 @@ mod tests { assert_eq!(expected, received); } + #[rstest] #[tokio::test] - async fn sends_message_on_communication_bus() { + async fn sends_message_on_communication_bus(publish_entry_request: Request) { let (tx, mut rx) = broadcast::channel(16); let store = initialize_store().await; let context = HttpServiceContext::new(store, tx); - // Prepare GraphQL mutation publishing an entry - let parameters = Variables::from_value(value!({ - "entryEncoded": ENTRY_ENCODED, - "operationEncoded": OPERATION_ENCODED - })); - - // Process mutation with given schema - let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); - context.schema.execute(request).await; + context.schema.execute(publish_entry_request).await; // Find out hash of test entry to determine operation id let entry_encoded = EntrySigned::new(ENTRY_ENCODED).unwrap(); @@ -198,4 +219,385 @@ mod tests { response.errors[0].to_string() ); } + + #[rstest] + #[tokio::test] + async fn post_gql_mutation(publish_entry_request: Request) { + let (tx, _rx) = broadcast::channel(16); + let store = initialize_store().await; + let context = HttpServiceContext::new(store, tx); + let client = TestClient::new(build_server(context)); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + assert_eq!( + response.json::().await, + json!({ + "data": { + "publishEntry": { + "logId":"1", + "seqNum":"2", + "backlink":"00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16", + "skiplink":null + } + } + }) + ); + } + + #[rstest] + #[case::no_entry("", "", "Bytes to decode had length of 0")] + #[case::invalid_entry_bytes("AB01", "", "Could not decode author public key from bytes")] + #[case::invalid_entry_hex_encoding( + "-/74='4,.=4-=235m-0 34.6-3", + OPERATION_ENCODED, + "invalid hex encoding in entry" + )] + #[case::no_operation( + ENTRY_ENCODED, + "", + "operation needs to match payload hash of encoded entry" + )] + #[case::invalid_operation_bytes( + ENTRY_ENCODED, + "AB01", + "operation needs to match payload hash of encoded entry" + )] + #[case::invalid_operation_hex_encoding( + ENTRY_ENCODED, + "0-25.-%5930n3544[{{{ @@@", + "invalid hex encoding in operation" + )] + #[case::operation_does_not_match( + ENTRY_ENCODED, + &{operation_encoded(Some(operation_fields(vec![("silly", OperationValue::Text("Sausage".to_string()))])), None, None).as_str().to_owned()}, + "operation needs to match payload hash of encoded entry" + )] + #[case::valid_entry_with_extra_hex_char_at_end( + &{ENTRY_ENCODED.to_string() + "A"}, + OPERATION_ENCODED, + "invalid hex encoding in entry" + )] + #[case::valid_entry_with_extra_hex_char_at_start( + &{"A".to_string() + ENTRY_ENCODED}, + OPERATION_ENCODED, + "invalid hex encoding in entry" + )] + #[case::should_not_have_skiplink( + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + Some(random_hash()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::should_not_have_backlink( + &entry_signed_encoded_unvalidated( + 1, + 1, + Some(random_hash()), + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::should_not_have_backlink_or_skiplink( + &entry_signed_encoded_unvalidated( + 1, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(DEFAULT_HASH.parse().unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())) +, + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::missing_backlink( + &entry_signed_encoded_unvalidated( + 2, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode backlink yamf hash: DecodeError" + )] + #[case::missing_skiplink( + &entry_signed_encoded_unvalidated( + 8, + 1, + Some(random_hash()), + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode backlink yamf hash: DecodeError" + )] + #[case::should_not_include_skiplink( + &entry_signed_encoded_unvalidated( + 14, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(DEFAULT_HASH.parse().unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())) +, + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::payload_hash_and_size_missing( + &entry_signed_encoded_unvalidated( + 14, + 1, + Some(random_hash()), + Some(DEFAULT_HASH.parse().unwrap()), + None, + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::backlink_and_skiplink_not_in_db( + &entry_signed_encoded_unvalidated(8, 1, Some(DEFAULT_HASH.parse().unwrap()), Some(Hash::new_from_bytes(vec![2, 3, 4]).unwrap()), Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + OPERATION_ENCODED, + "Could not find expected backlink in database for entry with id: " + )] + #[case::backlink_not_in_db( + &entry_signed_encoded_unvalidated(2, 1, Some(DEFAULT_HASH.parse().unwrap()), None, Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + OPERATION_ENCODED, + "Could not find expected backlink in database for entry with id: " + )] + #[case::previous_operations_not_in_db( + &entry_signed_encoded_unvalidated(1, 1, None, None, Some(operation(Some(operation_fields(vec![("silly", OperationValue::Text("Sausage".to_string()))])), Some(DEFAULT_HASH.parse().unwrap()), None)), key_pair(DEFAULT_PRIVATE_KEY)), + &{operation_encoded(Some(operation_fields(vec![("silly", OperationValue::Text("Sausage".to_string()))])), Some(DEFAULT_HASH.parse().unwrap()), None).as_str().to_owned()}, + "Could not find document for entry in database with id: " + )] + #[case::create_operation_with_previous_operations( + &entry_signed_encoded_unvalidated(1, 1, None, None, Some(Operation::from(&OperationEncoded::new(CREATE_OPERATION_WITH_PREVIOUS_OPS).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + CREATE_OPERATION_WITH_PREVIOUS_OPS, + "previous_operations field should be empty" + )] + #[case::update_operation_no_previous_operations( + &entry_signed_encoded_unvalidated(1, 1, None, None, Some(Operation::from(&OperationEncoded::new(UPDATE_OPERATION_NO_PREVIOUS_OPS).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + UPDATE_OPERATION_NO_PREVIOUS_OPS, + "previous_operations field can not be empty" + )] + #[case::delete_operation_no_previous_operations( + &entry_signed_encoded_unvalidated(1, 1, None, None, Some(Operation::from(&OperationEncoded::new(DELETE_OPERATION_NO_PREVIOUS_OPS).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + DELETE_OPERATION_NO_PREVIOUS_OPS, + "previous_operations field can not be empty" + )] + #[tokio::test] + async fn invalid_requests_fail( + #[case] entry_encoded: &str, + #[case] operation_encoded: &str, + #[case] expected_error_message: &str, + #[future] + #[from(test_db)] + db: TestSqlStore, + ) { + let db = db.await; + + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let publish_entry_request = publish_entry_request(entry_encoded, operation_encoded); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + let response = response.json::().await; + for error in response.get("errors").unwrap().as_array().unwrap() { + assert_eq!( + error.get("message").unwrap().as_str().unwrap(), + expected_error_message + ) + } + } + + #[rstest] + #[tokio::test] + async fn publish_many_entries( + #[from(test_db)] + #[future] + #[with(100, 1, true, TEST_SCHEMA_ID.parse().unwrap())] + db: TestSqlStore, + ) { + // test db populated with 100 entries. + let populated_db = db.await; + // Get the author. + let author = Author::try_from( + populated_db + .key_pairs + .first() + .unwrap() + .public_key() + .to_owned(), + ) + .unwrap(); + + // Setup the server and client with a new empty store. + let (tx, _rx) = broadcast::channel(16); + let store = initialize_store().await; + let context = HttpServiceContext::new(store, tx); + let client = TestClient::new(build_server(context)); + + // Get the entries from the prepopulated store. + let mut entries = populated_db + .store + .get_entries_by_schema(&TEST_SCHEMA_ID.parse().unwrap()) + .await + .unwrap(); + + // Sort them by seq_num. + entries.sort_by_key(|entry| entry.seq_num().as_u64()); + + for entry in entries { + // Prepare a publish entry request for each entry. + let publish_entry_request = publish_entry_request( + entry.entry_signed().as_str(), + entry.operation_encoded().unwrap().as_str(), + ); + + // Publish the entry and parse response. + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + let response = response.json::().await; + let publish_entry_response = response.get("data").unwrap().get("publishEntry").unwrap(); + + // Calculate the skiplink we expect in the repsonse. + let next_seq_num = entry.seq_num().next().unwrap(); + let skiplink_seq_num = next_seq_num.skiplink_seq_num(); + let skiplink_entry = match skiplink_seq_num { + Some(seq_num) if is_lipmaa_required(next_seq_num.as_u64()) => populated_db + .store + .get_entry_at_seq_num(&author, &entry.log_id(), &seq_num) + .await + .unwrap() + .map(|entry| entry.hash().as_str().to_owned()), + _ => None, + }; + + // Assert the returned log_id, seq_num, backlink and skiplink match our expectations. + assert_eq!( + publish_entry_response + .get("logId") + .unwrap() + .as_str() + .unwrap(), + "1" + ); + assert_eq!( + publish_entry_response + .get("seqNum") + .unwrap() + .as_str() + .unwrap(), + next_seq_num.as_u64().to_string() + ); + assert_eq!( + publish_entry_response + .get("skiplink") + .unwrap() + .as_str() + .map(|hash| hash.to_string()), + skiplink_entry + ); + assert_eq!( + publish_entry_response + .get("backlink") + .unwrap() + .as_str() + .unwrap(), + entry.hash().as_str() + ); + } + } + + #[rstest] + #[tokio::test] + async fn duplicate_publishing_of_entries( + #[from(test_db)] + #[future] + #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap())] + db: TestSqlStore, + ) { + let populated_db = db.await; + + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(populated_db.store.clone(), tx); + let client = TestClient::new(build_server(context)); + + // Get the entries from the prepopulated store. + let mut entries = populated_db + .store + .get_entries_by_schema(&TEST_SCHEMA_ID.parse().unwrap()) + .await + .unwrap(); + + // Sort them by seq_num. + entries.sort_by_key(|entry| entry.seq_num().as_u64()); + + let duplicate_entry = entries.first().unwrap(); + + // Prepare a publish entry request for each entry. + let publish_entry_request = publish_entry_request( + duplicate_entry.entry_signed().as_str(), + duplicate_entry.operation_encoded().unwrap().as_str(), + ); + + // Publish the entry and parse response. + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + let response = response.json::().await; + + // TODO: I think we'd like a nicer error message here: https://github.com/p2panda/aquadoggo/issues/159 + for error in response.get("errors").unwrap().as_array().unwrap() { + assert_eq!(error.get("message").unwrap().as_str().unwrap(), "Error occured during `LogStorage` request in storage provider: error returned from database: UNIQUE constraint failed: logs.author, logs.log_id") + } + } }