Skip to content

Commit

Permalink
Dynamic GraphQL document API (#141)
Browse files Browse the repository at this point in the history
  • Loading branch information
cafca committed Aug 7, 2022
1 parent 4e514ca commit 84c1b9b
Show file tree
Hide file tree
Showing 67 changed files with 2,844 additions and 610 deletions.
12 changes: 12 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions aquadoggo/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ edition = "2018"
anyhow = "^1.0.58"
async-graphql = "^3.0.38"
async-graphql-axum = "^3.0.38"
async-recursion = "^1.0.0"
async-trait = "^0.1.56"
axum = "^0.5.10"
bamboo-rs-core-ed25519-yasmf = "^0.1.1"
Expand All @@ -30,6 +31,7 @@ futures = "^0.3.21"
gql_client = "^1.0.6"
lipmaa-link = "^0.2.2"
log = "^0.4.17"
once_cell = "^1.12.0"
openssl-probe = "^0.1.5"
p2panda-rs = { git = "https://github.com/p2panda/p2panda", rev = "5d6508d5a9b4b766621c3bd14879cc568fbac02d" }
serde = { version = "^1.0.137", features = ["derive"] }
Expand Down
22 changes: 12 additions & 10 deletions aquadoggo/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,25 +3,27 @@
use std::ops::Deref;
use std::sync::Arc;

use p2panda_rs::storage_provider::traits::StorageProvider;

use crate::config::Configuration;
use crate::db::provider::SqlStorage;
use crate::schema::SchemaProvider;

/// Inner data shared across all services.
#[derive(Debug)]
pub struct Data {
pub struct Data<S: StorageProvider> {
/// Node configuration.
pub config: Configuration,

/// Storage provider with database connection pool.
pub store: SqlStorage,
pub store: S,

/// Schema provider gives access to system and application schemas.
pub schema_provider: SchemaProvider,
}

impl Data {
pub fn new(store: SqlStorage, config: Configuration, schema_provider: SchemaProvider) -> Self {
impl<S: StorageProvider> Data<S> {
pub fn new(store: S, config: Configuration, schema_provider: SchemaProvider) -> Self {
Self {
config,
store,
Expand All @@ -32,23 +34,23 @@ impl Data {

/// Data shared across all services.
#[derive(Debug)]
pub struct Context(pub Arc<Data>);
pub struct Context<S: StorageProvider = SqlStorage>(pub Arc<Data<S>>);

impl Context {
impl<S: StorageProvider> Context<S> {
/// Returns a new instance of `Context`.
pub fn new(store: SqlStorage, config: Configuration, schema_provider: SchemaProvider) -> Self {
pub fn new(store: S, config: Configuration, schema_provider: SchemaProvider) -> Self {
Self(Arc::new(Data::new(store, config, schema_provider)))
}
}

impl Clone for Context {
impl<S: StorageProvider> Clone for Context<S> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}

impl Deref for Context {
type Target = Data;
impl<S: StorageProvider> Deref for Context<S> {
type Target = Data<S>;

fn deref(&self) -> &Self::Target {
self.0.as_ref()
Expand Down
2 changes: 1 addition & 1 deletion aquadoggo/src/db/models/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use sqlx::FromRow;
///
/// We store the u64 integer values of `log_id` and `seq_num` as strings since SQLite doesn't
/// support storing unsigned 64 bit integers.
#[derive(FromRow, Debug, Serialize, Clone, PartialEq)]
#[derive(FromRow, Debug, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct EntryRow {
/// Public key of the author.
Expand Down
2 changes: 1 addition & 1 deletion aquadoggo/src/db/models/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use sqlx::FromRow;
/// Representation of a row from the `tasks` table as stored in the database.
///
/// This table holds all "pending" tasks of the materialization service worker.
#[derive(FromRow, Debug, Serialize, Clone, PartialEq)]
#[derive(FromRow, Debug, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct TaskRow {
/// Name of the task worker.
Expand Down
8 changes: 3 additions & 5 deletions aquadoggo/src/db/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,8 @@ mod tests {
use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner};
use crate::db::traits::DocumentStore;

use super::SqlStorage;

/// Inserts a `DocumentView` into the db and returns its view id.
async fn insert_document_view(db: &TestDatabase<SqlStorage>) -> DocumentViewId {
async fn insert_document_view(db: &TestDatabase) -> DocumentViewId {
let author = Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();
let entry = db
.store
Expand Down Expand Up @@ -156,7 +154,7 @@ mod tests {
#[with(1, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_view_id = insert_document_view(&db).await;
let result = db
.store
Expand All @@ -175,7 +173,7 @@ mod tests {
#[with(1, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let result = db
.store
.get_schema_by_document_view(&random_document_view_id)
Expand Down
21 changes: 10 additions & 11 deletions aquadoggo/src/db/stores/document.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,6 @@ mod tests {
};
use rstest::rstest;

use crate::db::provider::SqlStorage;
use crate::db::stores::document::{DocumentStore, DocumentView};
use crate::db::stores::entry::StorageEntry;
use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner};
Expand Down Expand Up @@ -374,7 +373,7 @@ mod tests {
#[with(1, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();

Expand Down Expand Up @@ -436,7 +435,7 @@ mod tests {
#[with(1, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let view_does_not_exist = db
.store
.get_document_view_by_id(&random_document_view_id)
Expand All @@ -453,7 +452,7 @@ mod tests {
#[with(10, 1, 1, false, SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();
let schema_id = SchemaId::from_str(SCHEMA_ID).unwrap();
Expand Down Expand Up @@ -511,7 +510,7 @@ mod tests {
#[from(test_db)] runner: TestDatabaseRunner,
operation: Operation,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_view = DocumentView::new(
&document_view_id,
&DocumentViewFields::new_from_operation_fields(
Expand All @@ -535,7 +534,7 @@ mod tests {
#[with(1, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_id = db.test_data.documents[0].clone();

let document_operations = db
Expand Down Expand Up @@ -582,7 +581,7 @@ mod tests {
#[with(1, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_id = db.test_data.documents[0].clone();

let document_operations = db
Expand Down Expand Up @@ -629,7 +628,7 @@ mod tests {
#[with(10, 1, 1, true)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_id = db.test_data.documents[0].clone();

let document_operations = db
Expand All @@ -656,7 +655,7 @@ mod tests {
#[with(10, 1, 1, true)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_id = db.test_data.documents[0].clone();

let document_operations = db
Expand Down Expand Up @@ -687,7 +686,7 @@ mod tests {
#[with(10, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let document_id = db.test_data.documents[0].clone();

let document_operations = db
Expand Down Expand Up @@ -722,7 +721,7 @@ mod tests {
#[with(10, 2, 1, false, SCHEMA_ID.parse().unwrap())]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let schema_id = SchemaId::from_str(SCHEMA_ID).unwrap();

for document_id in &db.test_data.documents {
Expand Down
19 changes: 9 additions & 10 deletions aquadoggo/src/db/stores/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use crate::db::provider::SqlStorage;
///
/// This struct implements the `AsStorageEntry` trait which is required when constructing the
/// `EntryStore`.
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct StorageEntry {
entry_signed: EntrySigned,
operation_encoded: OperationEncoded,
Expand Down Expand Up @@ -430,13 +430,12 @@ mod tests {
use p2panda_rs::test_utils::fixtures::{entry, key_pair};
use rstest::rstest;

use crate::db::provider::SqlStorage;
use crate::db::stores::entry::StorageEntry;
use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner};

#[rstest]
fn insert_entry(key_pair: KeyPair, entry: Entry, #[from(test_db)] runner: TestDatabaseRunner) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap();
let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap();
let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap();
Expand All @@ -452,7 +451,7 @@ mod tests {
#[with(10, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();
let log_id = LogId::default();
Expand Down Expand Up @@ -481,7 +480,7 @@ mod tests {
#[with(20, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap();
let log_id = LogId::default();

Expand Down Expand Up @@ -510,7 +509,7 @@ mod tests {
#[with(20, 1, 2, false, SCHEMA_ID.parse().unwrap())]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let schema_not_in_the_db = SchemaId::new_application(
"venue",
&Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(),
Expand Down Expand Up @@ -540,7 +539,7 @@ mod tests {
#[with(10, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();

Expand Down Expand Up @@ -590,7 +589,7 @@ mod tests {
#[with(20, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();

Expand Down Expand Up @@ -629,7 +628,7 @@ mod tests {
#[with(30, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();

Expand Down Expand Up @@ -666,7 +665,7 @@ mod tests {
#[with(20, 1, 1)]
runner: TestDatabaseRunner,
) {
runner.with_db_teardown(|db: TestDatabase<SqlStorage>| async move {
runner.with_db_teardown(|db: TestDatabase| async move {
let author =
Author::try_from(db.test_data.key_pairs[0].public_key().to_owned()).unwrap();

Expand Down
Loading

0 comments on commit 84c1b9b

Please sign in to comment.