From ab8d18618d27c1d5f1edd22fb310cbe64f3e636d Mon Sep 17 00:00:00 2001 From: Andreas Dzialocha Date: Mon, 27 Jun 2022 16:22:57 +0200 Subject: [PATCH] Run tests against PostgreSQL, fix compatibility (#170) * Make database url configurable for tests * Add github action for postgres, probably this is the beginning of a longer try-and-error phase * Add --command * Add MySQL, try using services * Enable logging * Try different logging * Remove that fancy fluff * Fix comment * Do not reset * Who did that typo? * Reset databases again * Correct database env var for postgres container * What was that * Reduce parallelization * Do not use BLOB * Make sure to disconnect when migration failed * Set mysql database again * Correctly run tests serially * WIP * Fix column type * Clean up * Add missing trait again * Bring back failing UNIQUE constraint * Make previous_operations in OperationRow an Option * Fix COALESCE type from int to text * Make previous_operation an Option in OperationFieldsJoinedRow * Update parse_operation_rows to account for None value previous_operations * Don't check for exact db error strings * Missing await * Typo * WIP massive test refactor making sure they get unwinded * Enable all features for the testing runtime * Fix ownership for tests with many cases and borrowed values * Don't use rstest shoul_panic macro * Make list_index INTEGER(255) * Revert list_index type change * Make columns Option in OperationFieldRow * Don't check for specific error message strings * Cast strings to numerics in get_paginated_log_entries() * Use ON CONFLICT to avoid task duplicates * Use COALESCE to compare nullable strings * Use test runner in all mutation tests * Revert removal of empty store in publish_entry test * Refactor test which used two dbs * Use test runner in query tests * Use test runner for http service test * Try different env var names for mariadb container * Try changing port configuration for mariadb container * Try using container label as hostname * Roll back port changes * Expose port properly * Remove MySQL support, fix async runtime of sqlx * Add entry to CHANGELOG.md * Give null values the same representation as index * Make fields in other rows also optional * Last tests using old database utilities * Clean up * Remove unnecessary comments, just unwrap test result * Make linters happy * Improve formatting of long case tests * Edit comments * Add some more comments Co-authored-by: Sam Andreae Co-authored-by: Vincent Ahrend --- .github/workflows/tests.yml | 57 +- CHANGELOG.md | 1 + Cargo.lock | 496 ++------------- README.md | 2 +- aquadoggo/Cargo.toml | 9 +- aquadoggo/README.md | 2 +- .../20220509090252_create-operations.sql | 2 +- .../20220617115933_create-tasks.sql | 4 +- aquadoggo/src/config.rs | 2 +- aquadoggo/src/db/mod.rs | 2 - aquadoggo/src/db/models/operation.rs | 34 +- aquadoggo/src/db/stores/document.rs | 472 +++++++------- aquadoggo/src/db/stores/entry.rs | 426 ++++++------- aquadoggo/src/db/stores/log.rs | 357 +++++------ aquadoggo/src/db/stores/operation.rs | 189 +++--- aquadoggo/src/db/stores/schema.rs | 235 ++++--- aquadoggo/src/db/stores/task.rs | 99 ++- aquadoggo/src/db/stores/test_utils.rs | 183 +++++- aquadoggo/src/db/utils.rs | 319 +++++----- aquadoggo/src/graphql/client/mutation.rs | 595 +++++++++--------- aquadoggo/src/graphql/client/query.rs | 126 ++-- aquadoggo/src/http/service.rs | 55 +- aquadoggo/src/materializer/service.rs | 238 ++++--- .../src/materializer/tasks/dependency.rs | 373 ++++++----- aquadoggo/src/materializer/tasks/reduce.rs | 267 ++++---- aquadoggo/src/test_helpers.rs | 71 +-- aquadoggo_cli/README.md | 2 +- 27 files changed, 2295 insertions(+), 2323 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 38806ce4e..8da77bd35 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -43,10 +43,58 @@ jobs: with: command: test args: --manifest-path ${{ env.cargo_manifest }} - # Ensure debug output is also tested env: + # Ensure debug output is also tested RUST_LOG: debug + rust-test-postgres: + runs-on: ubuntu-latest + + services: + postgres: + image: postgres:latest + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: aquadoggo-development + ports: + # Maps TCP port 5432 on service container to the host + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup Rust toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Restore from cargo cache + uses: actions/cache@v3 + with: + path: ${{ env.cache_path }} + key: ${{ runner.os }}-test-${{ hashFiles('**/Cargo.lock') }} + + - name: Run tests + uses: actions-rs/cargo@v1 + with: + command: test + # Make sure the tests run consecutively to avoid accessing the same + # database by multiple test threads + args: >- + --manifest-path ${{ env.cargo_manifest }} + -- --test-threads 1 + env: + DATABASE_URL: postgresql://postgres:postgres@localhost:5432/aquadoggo-development + rust-check: runs-on: ubuntu-latest @@ -151,9 +199,12 @@ jobs: uses: actions-rs/tarpaulin@v0.1 with: # Force cleaning via `--force-clean` flag to prevent buggy code coverage - args: --manifest-path ${{ env.cargo_manifest }} --locked --force-clean - # Ensure debug output is also tested + args: >- + --manifest-path ${{ env.cargo_manifest }} + --locked + --force-clean env: + # Ensure debug output is also tested RUST_LOG: debug - name: Upload to codecov.io diff --git a/CHANGELOG.md b/CHANGELOG.md index eac48fc94..f011c0d61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fix high CPU usage of idle workers [#136](https://github.com/p2panda/aquadoggo/pull/136) - Improve CI, track test coverage [#139](https://github.com/p2panda/aquadoggo/pull/139) +- Fix compatibility with PostgreSQL, change sqlx runtime to `tokio` [#170](https://github.com/p2panda/aquadoggo/pull/170) ## [0.2.0] diff --git a/Cargo.lock b/Cargo.lock index c82fdc3d1..dcf6d930c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,6 +121,7 @@ dependencies = [ "lru", "mockall", "mockall_double", + "once_cell", "openssl-probe", "p2panda-rs", "rand 0.8.5", @@ -179,46 +180,6 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71938f30533e4d95a6d17aa530939da3842c2ab6f4f84b9dae68447e4129f74a" -[[package]] -name = "async-channel" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8b508d585e01084059b60f06ade4cb7415cd2e4084b71dd1cb44e7d3fb9880" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - [[package]] name = "async-graphql" version = "3.0.38" @@ -308,90 +269,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "async-io" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e18f61464ae81cde0a23e713ae8fd299580c54d697a35820cfd0625b8b0e07" -dependencies = [ - "concurrent-queue", - "futures-lite", - "libc", - "log", - "once_cell", - "parking", - "polling", - "slab", - "socket2", - "waker-fn", - "winapi", -] - -[[package]] -name = "async-lock" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-process" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2c06e30a24e8c78a3987d07f0930edf76ef35e027e7bdb063fccafdad1f60c" -dependencies = [ - "async-io", - "blocking", - "cfg-if", - "event-listener", - "futures-lite", - "libc", - "once_cell", - "signal-hook", - "winapi", -] - -[[package]] -name = "async-rustls" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c86f33abd5a4f3e2d6d9251a9e0c6a7e52eb1113caf893dae8429bf4a53f378" -dependencies = [ - "futures-lite", - "rustls", - "webpki", -] - -[[package]] -name = "async-std" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52580991739c5cdb36cde8b2a516371c0a3b70dda36d916cc08b82372916808c" -dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "num_cpus", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" version = "0.3.3" @@ -413,12 +290,6 @@ dependencies = [ "syn", ] -[[package]] -name = "async-task" -version = "4.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30696a84d817107fc028e049980e09d5e140e8da8f1caeb17e8e950658a3cea9" - [[package]] name = "async-trait" version = "0.1.56" @@ -439,12 +310,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "atomic-waker" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" - [[package]] name = "atty" version = "0.2.14" @@ -456,15 +321,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -643,20 +499,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "blocking" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc" -dependencies = [ - "async-channel", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", - "once_cell", -] - [[package]] name = "bumpalo" version = "3.10.0" @@ -684,12 +526,6 @@ dependencies = [ "serde", ] -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "cc" version = "1.0.73" @@ -809,15 +645,6 @@ dependencies = [ "unreachable", ] -[[package]] -name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -828,12 +655,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "const-oid" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" - [[package]] name = "const-oid" version = "0.7.1" @@ -897,7 +718,7 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ - "autocfg 1.1.0", + "autocfg", "cfg-if", "crossbeam-utils", "lazy_static", @@ -925,17 +746,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "crypto-bigint" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83bd3bb4314701c568e340cd8cf78c975aa0ca79e03d3f6d1677d5b0c9c0c03" -dependencies = [ - "generic-array 0.14.5", - "rand_core 0.6.3", - "subtle", -] - [[package]] name = "crypto-bigint" version = "0.3.2" @@ -1058,23 +868,13 @@ dependencies = [ "tokio", ] -[[package]] -name = "der" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" -dependencies = [ - "const-oid 0.6.2", - "crypto-bigint 0.2.11", -] - [[package]] name = "der" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" dependencies = [ - "const-oid 0.7.1", + "const-oid", ] [[package]] @@ -1165,7 +965,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" dependencies = [ - "der 0.5.1", + "der", "elliptic-curve", "rfc6979", "signature", @@ -1210,8 +1010,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" dependencies = [ "base16ct", - "crypto-bigint 0.3.2", - "der 0.5.1", + "crypto-bigint", + "der", "ff", "generic-array 0.14.5", "group", @@ -1270,12 +1070,6 @@ dependencies = [ "rustversion", ] -[[package]] -name = "event-listener" -version = "2.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" - [[package]] name = "failure" version = "0.1.8" @@ -1440,21 +1234,6 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" -[[package]] -name = "futures-lite" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - [[package]] name = "futures-macro" version = "0.3.21" @@ -1568,18 +1347,6 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" -[[package]] -name = "gloo-timers" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "graphql-introspection-query" version = "0.2.0" @@ -1908,7 +1675,7 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" dependencies = [ - "autocfg 1.1.0", + "autocfg", "hashbrown", "serde", ] @@ -1968,23 +1735,11 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin 0.5.2", -] [[package]] name = "libc" @@ -1992,12 +1747,6 @@ version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" -[[package]] -name = "libm" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" - [[package]] name = "libsqlite3-sys" version = "0.23.2" @@ -2027,7 +1776,7 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ - "autocfg 1.1.0", + "autocfg", "scopeguard", ] @@ -2038,7 +1787,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", - "value-bag", ] [[package]] @@ -2091,7 +1839,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -2212,64 +1960,13 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint-dig" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4547ee5541c18742396ae2c895d0717d0f886d8823b8399cdaf7b07d63ad0480" -dependencies = [ - "autocfg 0.1.8", - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg 1.1.0", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ - "autocfg 1.1.0", - "libm", + "autocfg", ] [[package]] @@ -2325,7 +2022,7 @@ dependencies = [ "thiserror", "tls_codec", "typetag", - "uuid 1.1.2", + "uuid", ] [[package]] @@ -2435,12 +2132,6 @@ dependencies = [ "sec1", ] -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - [[package]] name = "parking_lot" version = "0.11.2" @@ -2495,15 +2186,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" -[[package]] -name = "pem-rfc7468" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84e93a3b1cc0510b03020f33f21e62acdde3dcaef432edc95bea377fbd4c2cd4" -dependencies = [ - "base64ct", -] - [[package]] name = "percent-encoding" version = "2.1.0" @@ -2585,38 +2267,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "116bee8279d783c0cf370efa1a94632f2108e5ef0bb32df31f051647810a4e2c" -dependencies = [ - "der 0.4.5", - "pem-rfc7468", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" -dependencies = [ - "der 0.4.5", - "pem-rfc7468", - "pkcs1", - "spki 0.4.1", - "zeroize", -] - [[package]] name = "pkcs8" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" dependencies = [ - "der 0.5.1", - "spki 0.5.4", + "der", + "spki", "zeroize", ] @@ -2626,19 +2284,6 @@ version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" -[[package]] -name = "polling" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" -dependencies = [ - "cfg-if", - "libc", - "log", - "wepoll-ffi", - "winapi", -] - [[package]] name = "poly1305" version = "0.7.2" @@ -2827,7 +2472,7 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ - "autocfg 1.1.0", + "autocfg", "crossbeam-deque", "either", "rayon-core", @@ -2932,7 +2577,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" dependencies = [ - "crypto-bigint 0.3.2", + "crypto-bigint", "hmac 0.11.0", "zeroize", ] @@ -2952,26 +2597,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rsa" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e05c2603e2823634ab331437001b411b9ed11660fbc4066f3908c84a9439260d" -dependencies = [ - "byteorder", - "digest 0.9.0", - "lazy_static", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1", - "pkcs8 0.7.6", - "rand 0.8.5", - "subtle", - "zeroize", -] - [[package]] name = "rstest" version = "0.12.0" @@ -3067,9 +2692,9 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ - "der 0.5.1", + "der", "generic-array 0.14.5", - "pkcs8 0.8.0", + "pkcs8", "subtle", "zeroize", ] @@ -3242,16 +2867,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "signal-hook" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -3329,15 +2944,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" -dependencies = [ - "der 0.4.5", -] - [[package]] name = "spki" version = "0.5.4" @@ -3345,7 +2951,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" dependencies = [ "base64ct", - "der 0.5.1", + "der", ] [[package]] @@ -3383,17 +2989,14 @@ dependencies = [ "bytes", "crc", "crossbeam-queue", - "digest 0.9.0", "dirs", "either", - "encoding_rs", "flume", "futures-channel", "futures-core", "futures-executor", "futures-intrusive", "futures-util", - "generic-array 0.14.5", "hashlink", "hex", "hmac 0.11.0", @@ -3404,13 +3007,10 @@ dependencies = [ "log", "md-5", "memchr", - "num-bigint", "once_cell", "paste", "percent-encoding", "rand 0.8.5", - "regex", - "rsa", "rustls", "serde", "serde_json", @@ -3421,8 +3021,8 @@ dependencies = [ "sqlx-rt", "stringprep", "thiserror", + "tokio-stream", "url", - "uuid 0.8.2", "webpki", "webpki-roots", "whoami", @@ -3453,8 +3053,9 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" dependencies = [ - "async-rustls", - "async-std", + "once_cell", + "tokio", + "tokio-rustls", ] [[package]] @@ -3695,6 +3296,28 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-tungstenite" version = "0.17.1" @@ -3986,12 +3609,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" - [[package]] name = "uuid" version = "1.1.2" @@ -4001,16 +3618,6 @@ dependencies = [ "getrandom 0.2.7", ] -[[package]] -name = "value-bag" -version = "1.0.0-alpha.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] - [[package]] name = "varu64" version = "0.6.2" @@ -4041,12 +3648,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "want" version = "0.3.0" @@ -4164,15 +3765,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "whoami" version = "1.2.1" diff --git a/README.md b/README.md index 53311ac98..d91da41e7 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Configurable node server implementation for the [`p2panda`] network running as a - Awaits signed operations from clients via GraphQL. - Verifies the consistency, format and signature of operations and rejects invalid ones. -- Stores operations of the network in a SQL database of your choice (SQLite, PostgreSQL or MySQL). +- Stores operations of the network in an SQL database of your choice (SQLite, PostgreSQL). - Materializes views on top of the known data. - Answers filterable and paginated data queries via GraphQL. - Discovers other nodes in local network and internet. diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index 1fd35b784..f15029934 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -26,6 +26,7 @@ deadqueue = { version = "0.2.2", default-features = false, features = [ ] } directories = "3.0.2" envy = "0.4.2" +futures = "0.3.21" graphql_client = "0.10" hex = "0.4.3" lipmaa-link = "0.2.2" @@ -42,8 +43,10 @@ serde = { version = "1.0.130", features = ["derive"] } serde_json = "1.0.67" sqlformat = "0.1.7" sqlx = { version = "0.5.7", features = [ - "all-databases", - "runtime-async-std-rustls", + "any", + "postgres", + "sqlite", + "runtime-tokio-rustls", ] } thiserror = "1.0.29" tokio = { version = "1.17.0", features = [ @@ -58,11 +61,11 @@ tower-http = { version = "0.2.4", default-features = false, features = [ "cors", ] } triggered = "0.1.2" -futures = "0.3.21" [dev-dependencies] hyper = "0.14.17" http = "0.2.6" +once_cell = "1.12.0" reqwest = { version = "0.11.9", default-features = false, features = [ "json", "stream", diff --git a/aquadoggo/README.md b/aquadoggo/README.md index 58771b673..299ef66b5 100644 --- a/aquadoggo/README.md +++ b/aquadoggo/README.md @@ -47,7 +47,7 @@ Configurable node server implementation for the [`p2panda`] network which can be - Awaits signed operations from clients via a JSON RPC API. - Verifies the consistency, format and signature of operations and rejects invalid ones. -- Stores operations of the network in a SQL database of your choice (SQLite, PostgreSQL or MySQL). +- Stores operations of the network in a SQL database of your choice (SQLite or PostgreSQL). - Materializes views on top of the known data. - Answers filterable and paginated data queries. - Discovers other nodes in local network and internet. diff --git a/aquadoggo/migrations/20220509090252_create-operations.sql b/aquadoggo/migrations/20220509090252_create-operations.sql index 7147320ff..411fab6b6 100644 --- a/aquadoggo/migrations/20220509090252_create-operations.sql +++ b/aquadoggo/migrations/20220509090252_create-operations.sql @@ -15,7 +15,7 @@ CREATE TABLE IF NOT EXISTS operation_fields_v1 ( operation_id TEXT NOT NULL, name TEXT NOT NULL, field_type TEXT NOT NULL, - value BLOB NULL, + value TEXT NULL, list_index NUMERIC NOT NULL, FOREIGN KEY(operation_id) REFERENCES operations_v1(operation_id) ); diff --git a/aquadoggo/migrations/20220617115933_create-tasks.sql b/aquadoggo/migrations/20220617115933_create-tasks.sql index b351d722a..65c14ab83 100644 --- a/aquadoggo/migrations/20220617115933_create-tasks.sql +++ b/aquadoggo/migrations/20220617115933_create-tasks.sql @@ -11,8 +11,8 @@ CREATE TABLE IF NOT EXISTS tasks ( -- but we want to check for equality including `null` values. CREATE UNIQUE INDEX ux_tasks ON tasks ( name, - COALESCE(document_id, 0), - COALESCE(document_view_id, 0) + COALESCE(document_id, '0'), + COALESCE(document_view_id, '0') ); -- Create an index because primary keys can not contain `null` columns. diff --git a/aquadoggo/src/config.rs b/aquadoggo/src/config.rs index cf03f6f7e..810ba3081 100644 --- a/aquadoggo/src/config.rs +++ b/aquadoggo/src/config.rs @@ -27,7 +27,7 @@ pub struct Configuration { /// Path to data directory. pub base_path: Option, - /// Database url (sqlite, mysql or postgres). + /// Database url (SQLite or PostgreSQL). pub database_url: Option, /// Maximum number of database connections in pool. diff --git a/aquadoggo/src/db/mod.rs b/aquadoggo/src/db/mod.rs index 22f0e6bea..aeb6faaaa 100644 --- a/aquadoggo/src/db/mod.rs +++ b/aquadoggo/src/db/mod.rs @@ -21,8 +21,6 @@ pub async fn create_database(url: &str) -> Result<()> { Any::create_database(url).await?; } - Any::drop_database(url); - Ok(()) } diff --git a/aquadoggo/src/db/models/operation.rs b/aquadoggo/src/db/models/operation.rs index bca601e88..a1b99e581 100644 --- a/aquadoggo/src/db/models/operation.rs +++ b/aquadoggo/src/db/models/operation.rs @@ -23,8 +23,9 @@ pub struct OperationRow { /// The id of the schema this operation follows. pub schema_id: String, - /// The previous operations of this operation concatenated into string format with `_` seperator. - pub previous_operations: String, + /// The previous operations of this operation concatenated into string format with `_` + /// separator. + pub previous_operations: Option, } /// A struct representing a single operation field row as it is inserted in the database. @@ -34,13 +35,19 @@ pub struct OperationFieldRow { pub operation_id: String, /// The name of this field. - pub name: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub name: Option, /// The type of this field. - pub field_type: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub field_type: Option, /// The actual value contained in this field. - pub value: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub value: Option, } /// A struct representing a joined OperationRow and OperationFieldRow. @@ -64,15 +71,22 @@ pub struct OperationFieldsJoinedRow { /// The id of the schema this operation follows. pub schema_id: String, - /// The previous operations of this operation concatenated into string format with `_` seperator. - pub previous_operations: String, + /// The previous operations of this operation concatenated into string format with `_` + /// separator. + pub previous_operations: Option, /// The name of this field. - pub name: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub name: Option, /// The type of this field. - pub field_type: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub field_type: Option, /// The actual value contained in this field. - pub value: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub value: Option, } diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index 9efa670fd..5a44fa67c 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -216,7 +216,7 @@ impl DocumentStore for SqlStorage { documents LEFT JOIN document_view_fields ON - documents.document_view_id = document_view_fields.document_view_id + documents.document_view_id = document_view_fields.document_view_id LEFT JOIN operation_fields_v1 ON document_view_fields.operation_id = operation_fields_v1.operation_id @@ -268,7 +268,7 @@ impl DocumentStore for SqlStorage { documents LEFT JOIN document_view_fields ON - documents.document_view_id = document_view_fields.document_view_id + documents.document_view_id = document_view_fields.document_view_id LEFT JOIN operation_fields_v1 ON document_view_fields.operation_id = operation_fields_v1.operation_id @@ -334,13 +334,15 @@ mod tests { use crate::db::stores::document::{DocumentStore, DocumentView}; use crate::db::stores::entry::StorageEntry; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; fn entries_to_document_views(entries: &[StorageEntry]) -> Vec { let mut document_views = Vec::new(); let mut current_document_view_fields = DocumentViewFields::new(); + for entry in entries { let operation_id: OperationId = entry.hash().into(); + for (name, value) in entry.operation().fields().unwrap().iter() { if entry.operation().is_delete() { continue; @@ -349,335 +351,325 @@ mod tests { .insert(name, DocumentViewValue::new(&operation_id, value)); } } + let document_view_fields = DocumentViewFields::new_from_operation_fields( &operation_id, &entry.operation().fields().unwrap(), ); + let document_view = DocumentView::new(&operation_id.clone().into(), &document_view_fields); + document_views.push(document_view) } + document_views } #[rstest] - #[tokio::test] - async fn inserts_gets_one_document_view( + fn inserts_gets_one_document_view( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - // Get one entry from the pre-polulated db - let entry = db - .store - .get_entry_at_seq_num(&author, &LogId::new(1), &SeqNum::new(1).unwrap()) - .await - .unwrap() - .unwrap(); - - // Construct a `DocumentView` - let operation_id: OperationId = entry.hash().into(); - let document_view_id: DocumentViewId = operation_id.clone().into(); - let document_view = DocumentView::new( - &document_view_id, - &DocumentViewFields::new_from_operation_fields( - &operation_id, - &entry.operation().fields().unwrap(), - ), - ); + // Get one entry from the pre-polulated db + let entry = db + .store + .get_entry_at_seq_num(&author, &LogId::new(1), &SeqNum::new(1).unwrap()) + .await + .unwrap() + .unwrap(); - // Insert into db - let result = db - .store - .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) - .await; + // Construct a `DocumentView` + let operation_id: OperationId = entry.hash().into(); + let document_view_id: DocumentViewId = operation_id.clone().into(); + let document_view = DocumentView::new( + &document_view_id, + &DocumentViewFields::new_from_operation_fields( + &operation_id, + &entry.operation().fields().unwrap(), + ), + ); - assert!(result.is_ok()); + // Insert into db + let result = db + .store + .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) + .await; - let retrieved_document_view = db - .store - .get_document_view_by_id(&document_view_id) - .await - .unwrap() - .unwrap(); - - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(retrieved_document_view.get(key).is_some()); - assert_eq!(retrieved_document_view.get(key), document_view.get(key)); - } + assert!(result.is_ok()); + + let retrieved_document_view = db + .store + .get_document_view_by_id(&document_view_id) + .await + .unwrap() + .unwrap(); + + for key in [ + "username", + "age", + "height", + "is_admin", + "profile_picture", + "many_profile_pictures", + "special_profile_picture", + "many_special_profile_pictures", + "another_relation_field", + ] { + assert!(retrieved_document_view.get(key).is_some()); + assert_eq!(retrieved_document_view.get(key), document_view.get(key)); + } + }); } #[rstest] - #[tokio::test] - async fn document_view_does_not_exist( + fn document_view_does_not_exist( random_document_view_id: DocumentViewId, #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - - let view_does_not_exist = db - .store - .get_document_view_by_id(&random_document_view_id) - .await - .unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let view_does_not_exist = db + .store + .get_document_view_by_id(&random_document_view_id) + .await + .unwrap(); - assert!(view_does_not_exist.is_none()) + assert!(view_does_not_exist.is_none()); + }); } #[rstest] - #[tokio::test] - async fn inserts_gets_many_document_views( + fn inserts_gets_many_document_views( #[from(test_db)] #[with(10, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); - - let log_id = LogId::default(); - let seq_num = SeqNum::default(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); - // Get 10 entries from the pre-populated test db - let entries = db - .store - .get_paginated_log_entries(&author, &log_id, &seq_num, 10) - .await - .unwrap(); - - // Parse them into document views - let document_views = entries_to_document_views(&entries); + let log_id = LogId::default(); + let seq_num = SeqNum::default(); - // Insert each of these views into the db - for document_view in document_views.clone() { - db.store - .insert_document_view(&document_view, &schema_id) + // Get 10 entries from the pre-populated test db + let entries = db + .store + .get_paginated_log_entries(&author, &log_id, &seq_num, 10) .await .unwrap(); - } - // Retrieve them again and assert they are the same as the inserted ones - for (count, entry) in entries.iter().enumerate() { - let result = db.store.get_document_view_by_id(&entry.hash().into()).await; + // Parse them into document views + let document_views = entries_to_document_views(&entries); - assert!(result.is_ok()); + // Insert each of these views into the db + for document_view in document_views.clone() { + db.store + .insert_document_view(&document_view, &schema_id) + .await + .unwrap(); + } - let document_view = result.unwrap().unwrap(); + // Retrieve them again and assert they are the same as the inserted ones + for (count, entry) in entries.iter().enumerate() { + let result = db.store.get_document_view_by_id(&entry.hash().into()).await; - // The update operation should be included in the view correctly, we check that here. - let expected_username = if count == 0 { - DocumentViewValue::new( - &entry.hash().into(), - &OperationValue::Text("panda".to_string()), - ) - } else { - DocumentViewValue::new( - &entry.hash().into(), - &OperationValue::Text("PANDA".to_string()), - ) - }; - assert_eq!(document_view.get("username").unwrap(), &expected_username); - } + assert!(result.is_ok()); + + let document_view = result.unwrap().unwrap(); + + // The update operation should be included in the view correctly, we check that here. + let expected_username = if count == 0 { + DocumentViewValue::new( + &entry.hash().into(), + &OperationValue::Text("panda".to_string()), + ) + } else { + DocumentViewValue::new( + &entry.hash().into(), + &OperationValue::Text("PANDA".to_string()), + ) + }; + assert_eq!(document_view.get("username").unwrap(), &expected_username); + } + }); } #[rstest] - #[tokio::test] - async fn insert_document_view_with_missing_operation( + fn insert_document_view_with_missing_operation( #[from(random_operation_id)] operation_id: OperationId, #[from(random_document_view_id)] document_view_id: DocumentViewId, - #[from(test_db)] - #[future] - db: TestSqlStore, - + #[from(test_db)] runner: TestDatabaseRunner, operation: Operation, ) { - let db = db.await; - let document_view = DocumentView::new( - &document_view_id, - &DocumentViewFields::new_from_operation_fields( - &operation_id, - &operation.fields().unwrap(), - ), - ); - - let result = db - .store - .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) - .await; - - assert_eq!( - result.unwrap_err().to_string(), - "A fatal error occured in DocumentStore: error returned from database: FOREIGN KEY constraint failed".to_string() - ); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view = DocumentView::new( + &document_view_id, + &DocumentViewFields::new_from_operation_fields( + &operation_id, + &operation.fields().unwrap(), + ), + ); + + let result = db + .store + .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) + .await; + + assert!(result.is_err()); + }); } #[rstest] - #[tokio::test] - async fn inserts_gets_documents( + fn inserts_gets_documents( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); + + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let result = db.store.insert_document(&document).await; - let result = db.store.insert_document(&document).await; + assert!(result.is_ok()); - assert!(result.is_ok()); + let document_view = db + .store + .get_document_view_by_id(document.view_id()) + .await + .unwrap() + .unwrap(); - let document_view = db - .store - .get_document_view_by_id(document.view_id()) - .await - .unwrap() - .unwrap(); - - let expected_document_view = document.view().unwrap(); - - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(document_view.get(key).is_some()); - assert_eq!(document_view.get(key), expected_document_view.get(key)); - } + let expected_document_view = document.view().unwrap(); + + for key in [ + "username", + "age", + "height", + "is_admin", + "profile_picture", + "many_profile_pictures", + "special_profile_picture", + "many_special_profile_pictures", + "another_relation_field", + ] { + assert!(document_view.get(key).is_some()); + assert_eq!(document_view.get(key), expected_document_view.get(key)); + } + }); } #[rstest] - #[tokio::test] - async fn gets_document_by_id( + fn gets_document_by_id( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let result = db.store.insert_document(&document).await; + let result = db.store.insert_document(&document).await; - assert!(result.is_ok()); + assert!(result.is_ok()); - let document_view = db - .store - .get_document_by_id(document.id()) - .await - .unwrap() - .unwrap(); - - let expected_document_view = document.view().unwrap(); - - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(document_view.get(key).is_some()); - assert_eq!(document_view.get(key), expected_document_view.get(key)); - } + let document_view = db + .store + .get_document_by_id(document.id()) + .await + .unwrap() + .unwrap(); + + let expected_document_view = document.view().unwrap(); + + for key in [ + "username", + "age", + "height", + "is_admin", + "profile_picture", + "many_profile_pictures", + "special_profile_picture", + "many_special_profile_pictures", + "another_relation_field", + ] { + assert!(document_view.get(key).is_some()); + assert_eq!(document_view.get(key), expected_document_view.get(key)); + } + }); } #[rstest] - #[tokio::test] - async fn no_view_when_document_deleted( + fn no_view_when_document_deleted( #[from(test_db)] #[with(10, 1, true)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let result = db.store.insert_document(&document).await; + let result = db.store.insert_document(&document).await; - assert!(result.is_ok()); + assert!(result.is_ok()); - let document_view = db.store.get_document_by_id(document.id()).await.unwrap(); + let document_view = db.store.get_document_by_id(document.id()).await.unwrap(); - assert!(document_view.is_none()); + assert!(document_view.is_none()); + }); } #[rstest] - #[tokio::test] - async fn gets_documents_by_schema( + fn gets_documents_by_schema( #[from(test_db)] #[with(10, 2, false, TEST_SCHEMA_ID.parse().unwrap())] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); - for document_id in &db.documents { - let document_operations = db - .store - .get_operations_by_document_id(document_id) - .await - .unwrap(); + for document_id in &db.documents { + let document_operations = db + .store + .get_operations_by_document_id(document_id) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - db.store.insert_document(&document).await.unwrap(); - } + db.store.insert_document(&document).await.unwrap(); + } - let schema_documents = db.store.get_documents_by_schema(&schema_id).await.unwrap(); + let schema_documents = db.store.get_documents_by_schema(&schema_id).await.unwrap(); - assert_eq!(schema_documents.len(), 2) + assert_eq!(schema_documents.len(), 2); + }); } } diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 919c5c8c4..80f5d2a3d 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -2,8 +2,6 @@ use async_trait::async_trait; use lipmaa_link::get_lipmaa_links_back_to; -use sqlx::{query, query_as}; - use p2panda_rs::entry::{decode_entry, Entry, EntrySigned, LogId, SeqNum}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::Author; @@ -13,17 +11,17 @@ use p2panda_rs::storage_provider::errors::EntryStorageError; use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; use p2panda_rs::storage_provider::ValidationError; use p2panda_rs::Validate; +use sqlx::{query, query_as}; use crate::db::models::EntryRow; use crate::db::provider::SqlStorage; -/// A signed entry and it's encoded operation. Entries are the lowest level data -/// type on the p2panda network, they are signed by authors and form bamboo append -/// only logs. The operation is an entries' payload, it contains the data mutations -/// which authors publish. +/// A signed entry and it's encoded operation. Entries are the lowest level data type on the +/// p2panda network, they are signed by authors and form bamboo append only logs. The operation is +/// an entries' payload, it contains the data mutations which authors publish. /// -/// This struct implements the `AsStorageEntry` trait which is required when -/// constructing the `EntryStore`. +/// This struct implements the `AsStorageEntry` trait which is required when constructing the +/// `EntryStore`. #[derive(Debug, Clone, PartialEq)] pub struct StorageEntry { entry_signed: EntrySigned, @@ -58,9 +56,9 @@ impl Validate for StorageEntry { } } -/// `From` implementation for converting an `EntryRow` into a `StorageEntry`. This is useful -/// when retrieving entries from the database. The `sqlx` crate coerces returned entry rows -/// into `EntryRow` but we normally want them as `StorageEntry`. +/// `From` implementation for converting an `EntryRow` into a `StorageEntry`. This is useful when +/// retrieving entries from the database. The `sqlx` crate coerces returned entry rows into +/// `EntryRow` but we normally want them as `StorageEntry`. impl From for StorageEntry { fn from(entry_row: EntryRow) -> Self { // Unwrapping everything here as we assume values coming from the database are valid. @@ -122,12 +120,11 @@ impl AsStorageEntry for StorageEntry { } } -/// Implementation of `EntryStore` trait which is required when constructing a -/// `StorageProvider`. +/// Implementation of `EntryStore` trait which is required when constructing a `StorageProvider`. /// -/// Handles storage and retrieval of entries in the form of`StorageEntry` which -/// implements the required `AsStorageEntry` trait. An intermediary struct `EntryRow` -/// is also used when retrieving an entry from the database. +/// Handles storage and retrieval of entries in the form of`StorageEntry` which implements the +/// required `AsStorageEntry` trait. An intermediary struct `EntryRow` is also used when retrieving +/// an entry from the database. #[async_trait] impl EntryStore for SqlStorage { /// Insert an entry into storage. @@ -174,9 +171,9 @@ impl EntryStore for SqlStorage { /// Get an entry from storage by it's hash id. /// - /// Returns a result containing the entry wrapped in an option if it was - /// found successfully. Returns `None` if the entry was not found in storage. - /// Errors when a fatal storage error occured. + /// Returns a result containing the entry wrapped in an option if it was found successfully. + /// Returns `None` if the entry was not found in storage. Errors when a fatal storage error + /// occured. async fn get_entry_by_hash( &self, hash: &Hash, @@ -207,9 +204,9 @@ impl EntryStore for SqlStorage { /// Get an entry at a sequence position within an author's log. /// - /// Returns a result containing the entry wrapped in an option if it was found - /// successfully. Returns None if the entry was not found in storage. Errors when - /// a fatal storage error occured. + /// Returns a result containing the entry wrapped in an option if it was found successfully. + /// Returns None if the entry was not found in storage. Errors when a fatal storage error + /// occured. async fn get_entry_at_seq_num( &self, author: &Author, @@ -246,9 +243,9 @@ impl EntryStore for SqlStorage { /// Get the latest entry of an author's log. /// - /// Returns a result containing the latest log entry wrapped in an option if an - /// entry was found. Returns None if the specified author and log could not be - /// found in storage. Errors when a fatal storage error occured. + /// Returns a result containing the latest log entry wrapped in an option if an entry was + /// found. Returns None if the specified author and log could not be found in storage. Errors + /// when a fatal storage error occured. async fn get_latest_entry( &self, author: &Author, @@ -286,9 +283,9 @@ impl EntryStore for SqlStorage { /// Get all entries of a given schema /// - /// Returns a result containing a vector of all entries which follow the passed - /// schema (identified by it's `SchemaId`). If no entries exist, or the schema - /// is not known by this node, then an empty vector is returned. + /// Returns a result containing a vector of all entries which follow the passed schema + /// (identified by it's `SchemaId`). If no entries exist, or the schema is not known by this + /// node, then an empty vector is returned. async fn get_entries_by_schema( &self, schema: &SchemaId, @@ -322,9 +319,9 @@ impl EntryStore for SqlStorage { /// Get all entries of a given schema. /// - /// Returns a result containing a vector of all entries which follow the passed - /// schema (identified by it's `SchemaId`). If no entries exist, or the schema - /// is not known by this node, then an empty vector is returned. + /// Returns a result containing a vector of all entries which follow the passed schema + /// (identified by it's `SchemaId`). If no entries exist, or the schema is not known by this + /// node, then an empty vector is returned. async fn get_paginated_log_entries( &self, author: &Author, @@ -348,7 +345,7 @@ impl EntryStore for SqlStorage { WHERE author = $1 AND log_id = $2 - AND CAST(seq_num AS NUMERIC) BETWEEN $3 and $4 + AND CAST(seq_num AS NUMERIC) BETWEEN CAST($3 AS NUMERIC) and CAST($4 AS NUMERIC) ORDER BY CAST(seq_num AS NUMERIC) ", @@ -366,13 +363,12 @@ impl EntryStore for SqlStorage { /// Get all entries which make up the certificate pool for a specified entry. /// - /// Returns a result containing a vector of all stored entries which are part - /// the passed entries' certificate pool. Errors if a fatal storage error - /// occurs. + /// Returns a result containing a vector of all stored entries which are part the passed + /// entries' certificate pool. Errors if a fatal storage error occurs. /// - /// It is worth noting that this method doesn't check if the certificate pool - /// is complete, it only returns entries which are part of the pool and found - /// in storage. If an entry was not stored, then the pool may be incomplete. + /// It is worth noting that this method doesn't check if the certificate pool is complete, it + /// only returns entries which are part of the pool and found in storage. If an entry was not + /// stored, then the pool may be incomplete. async fn get_certificate_pool( &self, author: &Author, @@ -435,259 +431,247 @@ mod tests { use rstest::rstest; use crate::db::stores::entry::StorageEntry; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] - #[tokio::test] - async fn insert_entry( - key_pair: KeyPair, - entry: Entry, - #[from(test_db)] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); - let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap(); - let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap(); - let result = db.store.insert_entry(doggo_entry).await; - - assert!(result.is_ok()) + fn insert_entry(key_pair: KeyPair, entry: Entry, #[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap(); + let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap(); + let result = db.store.insert_entry(doggo_entry).await; + + assert!(result.is_ok()); + }); } #[rstest] - #[tokio::test] - async fn try_insert_non_unique_entry( + fn try_insert_non_unique_entry( #[from(test_db)] #[with(10, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let log_id = LogId::new(1); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + let log_id = LogId::new(1); - let first_entry = db - .store - .get_entry_at_seq_num(&author, &log_id, &SeqNum::new(1).unwrap()) - .await - .unwrap() - .unwrap(); + let first_entry = db + .store + .get_entry_at_seq_num(&author, &log_id, &SeqNum::new(1).unwrap()) + .await + .unwrap() + .unwrap(); - let duplicate_doggo_entry = StorageEntry::new( - first_entry.entry_signed(), - first_entry.operation_encoded().unwrap(), - ) - .unwrap(); - let result = db.store.insert_entry(duplicate_doggo_entry).await; + let duplicate_doggo_entry = StorageEntry::new( + first_entry.entry_signed(), + first_entry.operation_encoded().unwrap(), + ) + .unwrap(); - assert_eq!( - result.unwrap_err().to_string(), - "Error occured during `EntryStorage` request in storage provider: error returned from \ - database: UNIQUE constraint failed: entries.author, entries.log_id, entries.seq_num" - ) + let result = db.store.insert_entry(duplicate_doggo_entry).await; + assert!(result.is_err()); + }); } #[rstest] - #[tokio::test] - async fn latest_entry( + fn latest_entry( #[from(test_db)] #[with(20, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); - let log_id = LogId::new(1); + runner.with_db_teardown(|db: TestDatabase| async move { + let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); + let log_id = LogId::new(1); - let latest_entry = db - .store - .get_latest_entry(&author_not_in_db, &log_id) - .await - .unwrap(); - assert!(latest_entry.is_none()); + let latest_entry = db + .store + .get_latest_entry(&author_not_in_db, &log_id) + .await + .unwrap(); + assert!(latest_entry.is_none()); - let author_in_db = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + let author_in_db = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let latest_entry = db - .store - .get_latest_entry(&author_in_db, &log_id) - .await - .unwrap(); - assert_eq!(latest_entry.unwrap().seq_num(), SeqNum::new(20).unwrap()); + let latest_entry = db + .store + .get_latest_entry(&author_in_db, &log_id) + .await + .unwrap(); + assert_eq!(latest_entry.unwrap().seq_num(), SeqNum::new(20).unwrap()); + }); } #[rstest] - #[tokio::test] - async fn entries_by_schema( + fn entries_by_schema( #[from(test_db)] #[with(20, 2, false, TEST_SCHEMA_ID.parse().unwrap())] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let schema_not_in_the_db = SchemaId::new_application( - "venue", - &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), - ); + runner.with_db_teardown(|db: TestDatabase| async move { + let schema_not_in_the_db = SchemaId::new_application( + "venue", + &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), + ); - let entries = db - .store - .get_entries_by_schema(&schema_not_in_the_db) - .await - .unwrap(); - assert!(entries.is_empty()); + let entries = db + .store + .get_entries_by_schema(&schema_not_in_the_db) + .await + .unwrap(); + assert!(entries.is_empty()); - let schema_in_the_db = TEST_SCHEMA_ID.parse().unwrap(); + let schema_in_the_db = TEST_SCHEMA_ID.parse().unwrap(); - let entries = db - .store - .get_entries_by_schema(&schema_in_the_db) - .await - .unwrap(); - assert!(entries.len() == 40); + let entries = db + .store + .get_entries_by_schema(&schema_in_the_db) + .await + .unwrap(); + assert!(entries.len() == 40); + }); } #[rstest] - #[tokio::test] - async fn entry_by_seq_number( + fn entry_by_seq_number( #[from(test_db)] #[with(10, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - - for seq_num in 1..10 { - let seq_num = SeqNum::new(seq_num).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + + for seq_num in 1..10 { + let seq_num = SeqNum::new(seq_num).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) + .await + .unwrap(); + assert_eq!(entry.unwrap().seq_num(), seq_num) + } + + let wrong_log = LogId::new(2); let entry = db .store - .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) + .get_entry_at_seq_num(&author, &wrong_log, &SeqNum::new(1).unwrap()) .await .unwrap(); - assert_eq!(entry.unwrap().seq_num(), seq_num) - } + assert!(entry.is_none()); - let wrong_log = LogId::new(2); - let entry = db - .store - .get_entry_at_seq_num(&author, &wrong_log, &SeqNum::new(1).unwrap()) - .await - .unwrap(); - assert!(entry.is_none()); - - let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); - let entry = db - .store - .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &SeqNum::new(1).unwrap()) - .await - .unwrap(); - assert!(entry.is_none()); + let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &SeqNum::new(1).unwrap()) + .await + .unwrap(); + assert!(entry.is_none()); - let seq_num_not_in_log = SeqNum::new(1000).unwrap(); - let entry = db - .store - .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &seq_num_not_in_log) - .await - .unwrap(); - assert!(entry.is_none()) + let seq_num_not_in_log = SeqNum::new(1000).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &seq_num_not_in_log) + .await + .unwrap(); + assert!(entry.is_none()); + }); } #[rstest] - #[tokio::test] - async fn get_entry_by_hash( + fn get_entry_by_hash( #[from(test_db)] #[with(20, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - - for seq_num in [1, 11, 18] { - let seq_num = SeqNum::new(seq_num).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + + for seq_num in [1, 11, 18] { + let seq_num = SeqNum::new(seq_num).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) + .await + .unwrap() + .unwrap(); + + let entry_hash = entry.hash(); + let entry_by_hash = db + .store + .get_entry_by_hash(&entry_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(entry, entry_by_hash) + } + + let entry_hash_not_in_db = Hash::new_from_bytes(vec![1, 2, 3]).unwrap(); let entry = db .store - .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) - .await - .unwrap() - .unwrap(); - - let entry_hash = entry.hash(); - let entry_by_hash = db - .store - .get_entry_by_hash(&entry_hash) + .get_entry_by_hash(&entry_hash_not_in_db) .await - .unwrap() .unwrap(); - assert_eq!(entry, entry_by_hash) - } - - let entry_hash_not_in_db = Hash::new_from_bytes(vec![1, 2, 3]).unwrap(); - let entry = db - .store - .get_entry_by_hash(&entry_hash_not_in_db) - .await - .unwrap(); - assert!(entry.is_none()) + assert!(entry.is_none()); + }); } #[rstest] - #[tokio::test] - async fn paginated_log_entries( + fn paginated_log_entries( #[from(test_db)] #[with(30, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let entries = db - .store - .get_paginated_log_entries(&author, &LogId::default(), &SeqNum::default(), 20) - .await - .unwrap(); + let entries = db + .store + .get_paginated_log_entries(&author, &LogId::default(), &SeqNum::default(), 20) + .await + .unwrap(); - for entry in entries.clone() { - assert!(entry.seq_num().as_u64() >= 1 && entry.seq_num().as_u64() <= 20) - } + for entry in entries.clone() { + assert!(entry.seq_num().as_u64() >= 1 && entry.seq_num().as_u64() <= 20) + } - assert_eq!(entries.len(), 20); + assert_eq!(entries.len(), 20); - let entries = db - .store - .get_paginated_log_entries(&author, &LogId::default(), &SeqNum::new(21).unwrap(), 20) - .await - .unwrap(); + let entries = db + .store + .get_paginated_log_entries( + &author, + &LogId::default(), + &SeqNum::new(21).unwrap(), + 20, + ) + .await + .unwrap(); - assert_eq!(entries.len(), 10); + assert_eq!(entries.len(), 10); + }); } #[rstest] - #[tokio::test] - async fn get_lipmaa_link_entries( + fn get_lipmaa_link_entries( #[from(test_db)] #[with(100, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let entries = db - .store - .get_certificate_pool(&author, &LogId::default(), &SeqNum::new(20).unwrap()) - .await - .unwrap(); + let entries = db + .store + .get_certificate_pool(&author, &LogId::default(), &SeqNum::new(20).unwrap()) + .await + .unwrap(); - let cert_pool_seq_nums = entries - .iter() - .map(|entry| entry.seq_num().as_u64()) - .collect::>(); + let cert_pool_seq_nums = entries + .iter() + .map(|entry| entry.seq_num().as_u64()) + .collect::>(); - assert!(!entries.is_empty()); - assert_eq!(cert_pool_seq_nums, vec![19, 18, 17, 13, 4, 1]); + assert!(!entries.is_empty()); + assert_eq!(cert_pool_seq_nums, vec![19, 18, 17, 13, 4, 1]); + }); } } diff --git a/aquadoggo/src/db/stores/log.rs b/aquadoggo/src/db/stores/log.rs index 43a277690..a88fd8766 100644 --- a/aquadoggo/src/db/stores/log.rs +++ b/aquadoggo/src/db/stores/log.rs @@ -1,14 +1,13 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use async_trait::async_trait; -use sqlx::{query, query_scalar}; - use p2panda_rs::document::DocumentId; use p2panda_rs::entry::LogId; use p2panda_rs::identity::Author; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::errors::LogStorageError; use p2panda_rs::storage_provider::traits::{AsStorageLog, LogStore}; +use sqlx::{query, query_scalar}; use crate::db::provider::SqlStorage; @@ -179,226 +178,190 @@ impl LogStore for SqlStorage { #[cfg(test)] mod tests { - use std::convert::TryFrom; - - use p2panda_rs::document::DocumentViewId; - use p2panda_rs::entry::{sign_and_encode, Entry as P2PandaEntry, LogId, SeqNum}; + use p2panda_rs::document::{DocumentId, DocumentViewId}; + use p2panda_rs::entry::{EntrySigned, LogId}; use p2panda_rs::hash::Hash; - use p2panda_rs::identity::{Author, KeyPair}; - use p2panda_rs::operation::{Operation, OperationEncoded, OperationFields, OperationValue}; + use p2panda_rs::identity::Author; + use p2panda_rs::operation::{OperationEncoded, OperationId}; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::{ AsStorageEntry, AsStorageLog, EntryStore, LogStore, StorageProvider, }; + use p2panda_rs::test_utils::fixtures::{ + entry_signed_encoded, operation_encoded, public_key, random_document_id, + random_operation_id, schema, + }; + use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::entry::StorageEntry; use crate::db::stores::log::StorageLog; - use crate::test_helpers::{initialize_db, random_entry_hash}; - - const TEST_AUTHOR: &str = "58223678ab378f1b07d1d8c789e6da01d16a06b1a4d17cc10119a0109181156c"; - - #[tokio::test] - async fn initial_log_id() { - let pool = initialize_db().await; - let author = Author::new(TEST_AUTHOR).unwrap(); - let storage_provider = SqlStorage { pool }; - - let log_id = storage_provider - .find_document_log_id(&author, None) - .await - .unwrap(); - - assert_eq!(log_id, LogId::new(1)); + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + + #[rstest] + fn initial_log_id( + #[from(public_key)] author: Author, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); + assert_eq!(log_id, LogId::new(1)); + }); } - #[tokio::test] - async fn prevent_duplicate_log_ids() { - let pool = initialize_db().await; - let storage_provider = SqlStorage { pool }; - - let author = Author::new(TEST_AUTHOR).unwrap(); - let document = Hash::new(&random_entry_hash()).unwrap(); - let schema = - SchemaId::new_application("venue", &Hash::new(&random_entry_hash()).unwrap().into()); - - let log = StorageLog::new(&author, &schema, &document.clone().into(), &LogId::new(1)); - assert!(storage_provider.insert_log(log).await.is_ok()); - - let log = StorageLog::new(&author, &schema, &document.into(), &LogId::new(1)); - assert!(storage_provider.insert_log(log).await.is_err()); + #[rstest] + fn prevent_duplicate_log_ids( + #[from(public_key)] author: Author, + #[from(schema)] schema: SchemaId, + #[from(random_document_id)] document: DocumentId, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log = StorageLog::new(&author, &schema, &document.clone(), &LogId::new(1)); + assert!(db.store.insert_log(log).await.is_ok()); + + let log = StorageLog::new(&author, &schema, &document, &LogId::new(1)); + assert!(db.store.insert_log(log).await.is_err()); + }); } - #[tokio::test] - async fn with_multi_hash_schema_id() { - let pool = initialize_db().await; - let storage_provider = SqlStorage { pool }; - - let author = Author::new(TEST_AUTHOR).unwrap(); - let document = Hash::new(&random_entry_hash()).unwrap(); - let schema = SchemaId::new_application( - "venue", - &DocumentViewId::new(&[ - Hash::new(&random_entry_hash()).unwrap().into(), - Hash::new(&random_entry_hash()).unwrap().into(), - ]) - .unwrap(), - ); - - let log = StorageLog::new(&author, &schema, &document.into(), &LogId::new(1)); - - assert!(storage_provider.insert_log(log).await.is_ok()); + #[rstest] + fn with_multi_hash_schema_id( + #[from(public_key)] author: Author, + #[from(random_operation_id)] operation_id_1: OperationId, + #[from(random_operation_id)] operation_id_2: OperationId, + #[from(random_document_id)] document: DocumentId, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let schema = SchemaId::new_application( + "venue", + &DocumentViewId::new(&[operation_id_1, operation_id_2]).unwrap(), + ); + + let log = StorageLog::new(&author, &schema, &document, &LogId::new(1)); + + assert!(db.store.insert_log(log).await.is_ok()); + }); } - #[tokio::test] - async fn selecting_next_log_id() { - let pool = initialize_db().await; - let key_pair = KeyPair::new(); - let author = Author::try_from(*key_pair.public_key()).unwrap(); - let schema = SchemaId::new_application( - "venue", - &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), - ); - - let storage_provider = SqlStorage { pool }; - - let log_id = storage_provider - .find_document_log_id(&author, None) - .await - .unwrap(); - - // We expect to be given the next log id when asking for a possible log id for a new - // document by the same author - assert_eq!(log_id, LogId::default()); - - // Starting with an empty db, we expect to be able to count up from 1 and expect each - // inserted document's log id to be euqal to the count index - for n in 1..12 { - let doc = Hash::new_from_bytes(vec![1, 2, n]).unwrap().into(); - - let log_id = storage_provider - .find_document_log_id(&author, None) - .await - .unwrap(); - assert_eq!(LogId::new(n.into()), log_id); - let log = StorageLog::new(&author, &schema, &doc, &log_id); - storage_provider.insert_log(log).await.unwrap(); - } + #[rstest] + fn selecting_next_log_id( + #[from(public_key)] author: Author, + #[from(schema)] schema: SchemaId, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); + + // We expect to be given the next log id when asking for a possible log id for a new + // document by the same author + assert_eq!(log_id, LogId::default()); + + // Starting with an empty db, we expect to be able to count up from 1 and expect each + // inserted document's log id to be euqal to the count index + for n in 1..12 { + let doc = Hash::new_from_bytes(vec![1, 2, n]).unwrap().into(); + let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); + + assert_eq!(LogId::new(n.into()), log_id); + + let log = StorageLog::new(&author, &schema, &doc, &log_id); + db.store.insert_log(log).await.unwrap(); + } + }); } - #[tokio::test] - async fn document_log_id() { - let pool = initialize_db().await; - - // Create a new document - // TODO: use p2panda-rs test utils once available - let key_pair = KeyPair::new(); - let author = Author::try_from(*key_pair.public_key()).unwrap(); - let log_id = LogId::new(1); - let schema = SchemaId::new_application( - "venue", - &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), - ); - let seq_num = SeqNum::new(1).unwrap(); - let mut fields = OperationFields::new(); - fields - .add("test", OperationValue::Text("Hello".to_owned())) - .unwrap(); - let operation = Operation::new_create(schema.clone(), fields).unwrap(); - let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); - let entry = P2PandaEntry::new(&log_id, Some(&operation), None, None, &seq_num).unwrap(); - let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); - - let storage_provider = SqlStorage { pool }; - - // Expect database to return nothing yet - assert_eq!( - storage_provider - .get_document_by_entry(&entry_encoded.hash()) - .await - .unwrap(), - None - ); - - let entry = StorageEntry::new(&entry_encoded.clone(), &operation_encoded).unwrap(); - - // Store entry in database - assert!(storage_provider.insert_entry(entry).await.is_ok()); - - let log = StorageLog::new( - &author, - &schema, - &entry_encoded.hash().into(), - &LogId::new(1), - ); - - // Store log in database - assert!(storage_provider.insert_log(log).await.is_ok()); - - // Expect to find document in database. The document hash should be the same as the hash of - // the entry which referred to the `CREATE` operation. - assert_eq!( - storage_provider - .get_document_by_entry(&entry_encoded.hash()) - .await - .unwrap(), - Some(entry_encoded.hash().into()) - ); - - // We expect to find this document in the default log - assert_eq!( - storage_provider - .find_document_log_id(&author, Some(&entry_encoded.hash().into())) - .await - .unwrap(), - LogId::default() - ); + #[rstest] + fn document_log_id( + #[from(schema)] schema: SchemaId, + #[from(entry_signed_encoded)] entry_encoded: EntrySigned, + #[from(operation_encoded)] operation_encoded: OperationEncoded, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Expect database to return nothing yet + assert_eq!( + db.store + .get_document_by_entry(&entry_encoded.hash()) + .await + .unwrap(), + None + ); + + let entry = StorageEntry::new(&entry_encoded.clone(), &operation_encoded).unwrap(); + let author = entry.author(); + + // Store entry in database + assert!(db.store.insert_entry(entry).await.is_ok()); + + let log = StorageLog::new( + &author, + &schema, + &entry_encoded.hash().into(), + &LogId::new(1), + ); + + // Store log in database + assert!(db.store.insert_log(log).await.is_ok()); + + // Expect to find document in database. The document hash should be the same as the + // hash of the entry which referred to the `CREATE` operation. + assert_eq!( + db.store + .get_document_by_entry(&entry_encoded.hash()) + .await + .unwrap(), + Some(entry_encoded.hash().into()) + ); + + // We expect to find this document in the default log + assert_eq!( + db.store + .find_document_log_id(&author, Some(&entry_encoded.hash().into())) + .await + .unwrap(), + LogId::default() + ); + }); } - #[tokio::test] - async fn log_ids() { - let pool = initialize_db().await; + #[rstest] + fn log_ids( + #[from(public_key)] author: Author, + #[from(test_db)] runner: TestDatabaseRunner, + #[from(schema)] schema: SchemaId, + #[from(random_document_id)] document_first: DocumentId, + #[from(random_document_id)] document_second: DocumentId, + #[from(random_document_id)] document_third: DocumentId, + #[from(random_document_id)] document_forth: DocumentId, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Register two log ids at the beginning + let log_1 = StorageLog::new(&author, &schema, &document_first, &LogId::new(1)); + let log_2 = StorageLog::new(&author, &schema, &document_second, &LogId::new(2)); - // Mock author - let author = Author::new(TEST_AUTHOR).unwrap(); + db.store.insert_log(log_1).await.unwrap(); + db.store.insert_log(log_2).await.unwrap(); - // Mock schema - let schema = - SchemaId::new_application("venue", &Hash::new(&random_entry_hash()).unwrap().into()); + // Find next free log id and register it + let log_id = db.store.next_log_id(&author).await.unwrap(); + assert_eq!(log_id, LogId::new(3)); - // Mock four different document hashes - let document_first = Hash::new(&random_entry_hash()).unwrap(); - let document_second = Hash::new(&random_entry_hash()).unwrap(); - let document_third = Hash::new(&random_entry_hash()).unwrap(); - let document_forth = Hash::new(&random_entry_hash()).unwrap(); + let log_3 = StorageLog::new(&author, &schema, &document_third.into(), &log_id); - let storage_provider = SqlStorage { pool }; + db.store.insert_log(log_3).await.unwrap(); - // Register two log ids at the beginning - let log_1 = StorageLog::new(&author, &schema, &document_first.into(), &LogId::new(1)); - let log_2 = StorageLog::new(&author, &schema, &document_second.into(), &LogId::new(2)); + // Find next free log id and register it + let log_id = db.store.next_log_id(&author).await.unwrap(); + assert_eq!(log_id, LogId::new(4)); - storage_provider.insert_log(log_1).await.unwrap(); - storage_provider.insert_log(log_2).await.unwrap(); + let log_4 = StorageLog::new(&author, &schema, &document_forth.into(), &log_id); - // Find next free log id and register it - let log_id = storage_provider.next_log_id(&author).await.unwrap(); - assert_eq!(log_id, LogId::new(3)); + db.store.insert_log(log_4).await.unwrap(); - let log_3 = StorageLog::new(&author, &schema, &document_third.into(), &log_id); - - storage_provider.insert_log(log_3).await.unwrap(); - - // Find next free log id and register it - let log_id = storage_provider.next_log_id(&author).await.unwrap(); - assert_eq!(log_id, LogId::new(4)); - - let log_4 = StorageLog::new(&author, &schema, &document_forth.into(), &log_id); - - storage_provider.insert_log(log_4).await.unwrap(); - - // Find next free log id - let log_id = storage_provider.next_log_id(&author).await.unwrap(); - assert_eq!(log_id, LogId::new(5)); + // Find next free log id + let log_id = db.store.next_log_id(&author).await.unwrap(); + assert_eq!(log_id, LogId::new(5)); + }); } } diff --git a/aquadoggo/src/db/stores/operation.rs b/aquadoggo/src/db/stores/operation.rs index 371654e51..cac2b5d96 100644 --- a/aquadoggo/src/db/stores/operation.rs +++ b/aquadoggo/src/db/stores/operation.rs @@ -144,7 +144,7 @@ impl OperationStore for SqlStorage { .bind(name.to_owned()) .bind(value.field_type().to_string()) .bind(db_value) - .bind(index.to_string()) + .bind(index as i32) .execute(&self.pool) }) .collect::>() @@ -298,7 +298,7 @@ mod tests { }; use rstest::rstest; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] #[case::create_operation(create_operation(&default_fields()))] @@ -306,63 +306,57 @@ mod tests { #[case::update_operation_many_prev_ops(update_operation(&default_fields(), &random_previous_operations(12)))] #[case::delete_operation(delete_operation(&DEFAULT_HASH.parse().unwrap()))] #[case::delete_operation_many_prev_ops(delete_operation(&random_previous_operations(12)))] - #[tokio::test] - async fn insert_get_operations( + fn insert_get_operations( #[case] operation: Operation, #[from(public_key)] author: Author, operation_id: OperationId, document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - // Construct the storage operation. - let operation = VerifiedOperation::new(&author, &operation_id, &operation).unwrap(); - - // Insert the doggo operation into the db, returns Ok(true) when succesful. - let result = db.store.insert_operation(&operation, &document_id).await; - assert!(result.is_ok()); - - // Request the previously inserted operation by it's id. - let returned_operation = db - .store - .get_operation_by_id(operation.operation_id()) - .await - .unwrap() - .unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + // Construct the storage operation. + let operation = VerifiedOperation::new(&author, &operation_id, &operation).unwrap(); + + // Insert the doggo operation into the db, returns Ok(true) when succesful. + let result = db.store.insert_operation(&operation, &document_id).await; + assert!(result.is_ok()); + + // Request the previously inserted operation by it's id. + let returned_operation = db + .store + .get_operation_by_id(operation.operation_id()) + .await + .unwrap() + .unwrap(); - assert_eq!(returned_operation.public_key(), operation.public_key()); - assert_eq!(returned_operation.fields(), operation.fields()); - assert_eq!(returned_operation.operation_id(), operation.operation_id()); + assert_eq!(returned_operation.public_key(), operation.public_key()); + assert_eq!(returned_operation.fields(), operation.fields()); + assert_eq!(returned_operation.operation_id(), operation.operation_id()); + }); } #[rstest] - #[tokio::test] - async fn insert_operation_twice( + fn insert_operation_twice( #[from(verified_operation)] verified_operation: VerifiedOperation, document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - - assert!(db - .store - .insert_operation(&verified_operation, &document_id) - .await - .is_ok()); + runner.with_db_teardown(|db: TestDatabase| async move { + db.store + .insert_operation(&verified_operation, &document_id) + .await + .unwrap(); - assert_eq!( - db.store.insert_operation(&verified_operation, &document_id).await.unwrap_err().to_string(), - "A fatal error occured in OperationStore: error returned from database: UNIQUE constraint failed: operations_v1.entry_hash" - ) + assert!(db + .store + .insert_operation(&verified_operation, &document_id) + .await + .is_err()); + }); } #[rstest] - #[tokio::test] - async fn gets_document_by_operation_id( + fn gets_document_by_operation_id( #[from(verified_operation)] #[with(Some(operation_fields(default_fields())), None, None, None, Some(DEFAULT_HASH.parse().unwrap()))] create_operation: VerifiedOperation, @@ -370,80 +364,77 @@ mod tests { #[with(Some(operation_fields(default_fields())), Some(DEFAULT_HASH.parse().unwrap()))] update_operation: VerifiedOperation, document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - - assert!(db - .store - .get_document_by_operation_id(create_operation.operation_id()) - .await - .unwrap() - .is_none()); - - db.store - .insert_operation(&create_operation, &document_id) - .await - .unwrap(); - - assert_eq!( - db.store + runner.with_db_teardown(|db: TestDatabase| async move { + assert!(db + .store .get_document_by_operation_id(create_operation.operation_id()) .await .unwrap() - .unwrap(), - document_id.clone() - ); + .is_none()); - db.store - .insert_operation(&update_operation, &document_id) - .await - .unwrap(); + db.store + .insert_operation(&create_operation, &document_id) + .await + .unwrap(); + + assert_eq!( + db.store + .get_document_by_operation_id(create_operation.operation_id()) + .await + .unwrap() + .unwrap(), + document_id.clone() + ); - assert_eq!( db.store - .get_document_by_operation_id(create_operation.operation_id()) + .insert_operation(&update_operation, &document_id) .await - .unwrap() - .unwrap(), - document_id.clone() - ); + .unwrap(); + + assert_eq!( + db.store + .get_document_by_operation_id(create_operation.operation_id()) + .await + .unwrap() + .unwrap(), + document_id.clone() + ); + }); } #[rstest] - #[tokio::test] - async fn get_operations_by_document_id( + fn get_operations_by_document_id( key_pair: KeyPair, #[from(test_db)] #[with(5, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); - let latest_entry = db - .store - .get_latest_entry(&author, &LogId::default()) - .await - .unwrap() - .unwrap(); + let latest_entry = db + .store + .get_latest_entry(&author, &LogId::default()) + .await + .unwrap() + .unwrap(); - let document_id = db - .store - .get_document_by_entry(&latest_entry.hash()) - .await - .unwrap() - .unwrap(); + let document_id = db + .store + .get_document_by_entry(&latest_entry.hash()) + .await + .unwrap() + .unwrap(); - let operations_by_document_id = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let operations_by_document_id = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - assert_eq!(operations_by_document_id.len(), 5) + assert_eq!(operations_by_document_id.len(), 5) + }); } } diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index 46d1df332..0e0358be7 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -106,7 +106,9 @@ mod tests { use rstest::rstest; use crate::db::provider::SqlStorage; - use crate::db::stores::test_utils::{insert_entry_operation_and_view, test_db, TestSqlStore}; + use crate::db::stores::test_utils::{ + insert_entry_operation_and_view, test_db, TestDatabase, TestDatabaseRunner, + }; use super::SchemaStore; @@ -163,108 +165,187 @@ mod tests { #[rstest] #[case::valid_schema_and_fields( - "venue_name = { type: \"str\", value: tstr, }\ncreate-fields = { venue_name }\nupdate-fields = { + ( venue_name ) }", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"name\"")] - #[case::fields_missing_name_field( - "", - operation_fields(vec![("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"type\"")] - #[case::fields_missing_type_field( - "", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string()))]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"name\"")] - #[case::schema_missing_name_field( - "", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"description\"")] - #[case::schema_missing_name_description( - "", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string()))]))] - #[tokio::test] - async fn get_schema( + r#"venue_name = { type: "str", value: tstr, } + create-fields = { venue_name } + update-fields = { + ( venue_name ) }"#, + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + fn get_schema( #[case] cddl_str: &str, #[case] schema_field_definition: OperationFields, #[case] schema_definition: OperationFields, key_pair: KeyPair, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let document_view_id = - insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; + let cddl_str = cddl_str.to_string(); - let document_view_id = - insert_schema_definition(&db.store, &key_pair, &document_view_id, schema_definition) - .await; + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; - let schema = db - .store - .get_schema_by_id(&document_view_id) - .await - .unwrap_or_else(|e| panic!("{}", e)); + let document_view_id = insert_schema_definition( + &db.store, + &key_pair, + &document_view_id, + schema_definition, + ) + .await; + + let schema = db.store.get_schema_by_id(&document_view_id).await.unwrap(); - assert_eq!(schema.unwrap().as_cddl(), cddl_str) + assert_eq!( + schema.unwrap().as_cddl().replace(" ", ""), + cddl_str.replace(" ", "") + ); + }); } #[rstest] - #[case::works( - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "invalid fields found for this schema")] - #[case::does_not_work( - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string()))]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[tokio::test] - async fn get_all_schema( + #[case::fields_missing_name_field("missing field \"name\"", + operation_fields(vec![ + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::fields_missing_type_field("missing field \"type\"", + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::schema_missing_name_field("missing field \"name\"", + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::schema_missing_name_description("missing field \"description\"", + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())) + ]) + )] + fn get_schema_errors( + #[case] err_str: &str, #[case] schema_field_definition: OperationFields, #[case] schema_definition: OperationFields, key_pair: KeyPair, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let document_view_id = - insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; + let err_str = err_str.to_string(); + + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; - insert_schema_definition(&db.store, &key_pair, &document_view_id, schema_definition).await; + let document_view_id = insert_schema_definition( + &db.store, + &key_pair, + &document_view_id, + schema_definition, + ) + .await; - let schemas = db - .store - .get_all_schema() - .await - .unwrap_or_else(|e| panic!("{}", e)); + let schema = db.store.get_schema_by_id(&document_view_id).await; - assert_eq!(schemas.len(), 1) + assert_eq!(schema.unwrap_err().to_string(), err_str); + }); } #[rstest] - #[case::schema_fields_do_not_exist( - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[tokio::test] - async fn schema_fields_do_not_exist( + #[case::works( + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::does_not_work( + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + fn get_all_schema( + #[case] schema_field_definition: OperationFields, #[case] schema_definition: OperationFields, - #[from(document_view_id)] schema_fields_id: DocumentViewId, - #[from(test_db)] - #[future] - db: TestSqlStore, key_pair: KeyPair, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let document_view_id = - insert_schema_definition(&db.store, &key_pair, &schema_fields_id, schema_definition) + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; + + insert_schema_definition(&db.store, &key_pair, &document_view_id, schema_definition) .await; - // Retrieve the schema by it's document_view_id. - let schema = db.store.get_schema_by_id(&document_view_id).await; + let schemas = db.store.get_all_schema().await; + + if schemas.is_err() { + assert_eq!( + schemas.unwrap_err().to_string(), + "invalid fields found for this schema".to_string() + ) + } else { + assert_eq!(schemas.unwrap().len(), 1); + } + }); + } - assert_eq!(schema.unwrap_err().to_string(), format!("No document view found for schema field definition with id: {0} which is required by schema definition {1}", schema_fields_id, document_view_id)) + #[rstest] + #[case::schema_fields_do_not_exist( + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + fn schema_fields_do_not_exist( + #[case] schema_definition: OperationFields, + #[from(document_view_id)] schema_fields_id: DocumentViewId, + #[from(test_db)] runner: TestDatabaseRunner, + key_pair: KeyPair, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_definition(&db.store, &key_pair, &schema_fields_id, schema_definition) + .await; + + // Retrieve the schema by it's document_view_id. + let schema = db.store.get_schema_by_id(&document_view_id).await; + + assert_eq!( + schema.unwrap_err().to_string(), + format!( + "No document view found for schema field definition with id: {0} which is required by schema definition {1}", + schema_fields_id, + document_view_id + ) + ); + }); } } diff --git a/aquadoggo/src/db/stores/task.rs b/aquadoggo/src/db/stores/task.rs index d1ff1417d..c82e08f9d 100644 --- a/aquadoggo/src/db/stores/task.rs +++ b/aquadoggo/src/db/stores/task.rs @@ -24,7 +24,7 @@ impl SqlStorage { // Insert task into database query( " - INSERT OR IGNORE INTO + INSERT INTO tasks ( name, document_id, @@ -32,6 +32,7 @@ impl SqlStorage { ) VALUES ($1, $2, $3) + ON CONFLICT DO NOTHING ", ) .bind(task.worker_name()) @@ -61,9 +62,10 @@ impl SqlStorage { tasks WHERE name = $1 - -- Use `IS` because these columns can contain `null` values. - AND document_id IS $2 - AND document_view_id IS $3 + -- Use `COALESCE` to compare possible null values in a way + -- that is compatible between SQLite and PostgreSQL. + AND COALESCE(document_id, '0') = COALESCE($2, '0') + AND COALESCE(document_view_id, '0') = COALESCE($3, '0') ", ) .bind(task.worker_name()) @@ -126,62 +128,53 @@ mod tests { use p2panda_rs::test_utils::fixtures::{document_id, document_view_id}; use rstest::rstest; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::materializer::{Task, TaskInput}; #[rstest] - #[tokio::test] - async fn insert_get_remove_tasks( + fn insert_get_remove_tasks( document_view_id: DocumentViewId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - - // Prepare test data - let task = Task::new("reduce", TaskInput::new(None, Some(document_view_id))); - - // Insert task - let result = db.store.insert_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Check if task exists in database - let result = db.store.get_tasks().await; - assert_eq!(result.unwrap(), vec![task.clone()]); - - // Remove task - let result = db.store.remove_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Check if all tasks got removed - let result = db.store.get_tasks().await; - assert_eq!(result.unwrap(), vec![]); + runner.with_db_teardown(|db: TestDatabase| async move { + // Prepare test data + let task = Task::new("reduce", TaskInput::new(None, Some(document_view_id))); + + // Insert task + let result = db.store.insert_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Check if task exists in database + let result = db.store.get_tasks().await; + assert_eq!(result.unwrap(), vec![task.clone()]); + + // Remove task + let result = db.store.remove_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Check if all tasks got removed + let result = db.store.get_tasks().await; + assert_eq!(result.unwrap(), vec![]); + }); } #[rstest] - #[tokio::test] - async fn avoid_duplicates( - document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - - // Prepare test data - let task = Task::new("reduce", TaskInput::new(Some(document_id), None)); - - // Insert task - let result = db.store.insert_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Insert the same thing again, it should silently fail - let result = db.store.insert_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Check for duplicates - let result = db.store.get_tasks().await; - assert_eq!(result.unwrap().len(), 1); + fn avoid_duplicates(document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Prepare test data + let task = Task::new("reduce", TaskInput::new(Some(document_id), None)); + + // Insert task + let result = db.store.insert_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Insert the same thing again, it should silently fail + let result = db.store.insert_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Check for duplicates + let result = db.store.get_tasks().await; + assert_eq!(result.unwrap().len(), 1); + }); } } diff --git a/aquadoggo/src/db/stores/test_utils.rs b/aquadoggo/src/db/stores/test_utils.rs index 1008e1029..609669f55 100644 --- a/aquadoggo/src/db/stores/test_utils.rs +++ b/aquadoggo/src/db/stores/test_utils.rs @@ -2,6 +2,7 @@ use std::convert::TryFrom; +use futures::Future; use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; use p2panda_rs::entry::{sign_and_encode, Entry}; use p2panda_rs::hash::Hash; @@ -17,12 +18,16 @@ use p2panda_rs::storage_provider::traits::{ use p2panda_rs::test_utils::constants::{DEFAULT_PRIVATE_KEY, TEST_SCHEMA_ID}; use p2panda_rs::test_utils::fixtures::{create_operation, delete_operation, update_operation}; use rstest::fixture; +use sqlx::migrate::MigrateDatabase; +use sqlx::Any; +use tokio::runtime::Builder; use crate::db::provider::SqlStorage; use crate::db::stores::{StorageEntry, StorageLog}; use crate::db::traits::DocumentStore; +use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; use crate::graphql::client::{EntryArgsRequest, PublishEntryRequest}; -use crate::test_helpers::initialize_db; +use crate::test_helpers::TEST_CONFIG; /// The fields used as defaults in the tests. pub fn doggo_test_fields() -> Vec<(&'static str, OperationValue)> { @@ -168,23 +173,103 @@ pub async fn insert_entry_operation_and_view( (document_id, document_view_id) } -/// Container for `SqlStore` with access to the document ids and key_pairs used in the -/// pre-populated database for testing. -pub struct TestSqlStore { - pub store: SqlStorage, - pub key_pairs: Vec, - pub documents: Vec, +#[async_trait::async_trait] +pub trait AsyncTestFn { + async fn call(self, db: TestDatabase); } -/// Fixture for constructing a storage provider instance backed by a pre-polpulated database. Passed -/// parameters define what the db should contain. The first entry in each log contains a valid CREATE -/// operation following entries contain duplicate UPDATE operations. If the with_delete flag is set -/// to true the last entry in all logs contain be a DELETE operation. +#[async_trait::async_trait] +impl AsyncTestFn for FN +where + FN: FnOnce(TestDatabase) -> F + Sync + Send, + F: Future + Send, +{ + async fn call(self, db: TestDatabase) { + self(db).await + } +} + +pub struct TestDatabaseRunner { + /// Number of entries per log/document. + no_of_entries: usize, + + /// Number of authors, each with a log populated as defined above. + no_of_authors: usize, + + /// A boolean flag for wether all logs should contain a delete operation. + with_delete: bool, + + /// The schema used for all operations in the db. + schema: SchemaId, + + /// The fields used for every CREATE operation. + create_operation_fields: Vec<(&'static str, OperationValue)>, + + /// The fields used for every UPDATE operation. + update_operation_fields: Vec<(&'static str, OperationValue)>, +} + +impl TestDatabaseRunner { + /// Provides a safe way to write tests using a database which closes the pool connection + /// automatically when the test succeeds or fails. + /// + /// Takes an (async) test function as an argument and passes over the `TestDatabase` instance + /// so it can be used inside of it. + pub fn with_db_teardown(&self, test: F) { + let runtime = Builder::new_current_thread() + .worker_threads(1) + .enable_all() + .thread_name("with_db_teardown") + .build() + .expect("Could not build tokio Runtime for test"); + + runtime.block_on(async { + // Initialise test database + let db = create_test_db( + self.no_of_entries, + self.no_of_authors, + self.with_delete, + self.schema.clone(), + self.create_operation_fields.clone(), + self.update_operation_fields.clone(), + ) + .await; + + // Get a handle of the underlying database connection pool + let pool = db.store.pool.clone(); + + // Spawn the test in a separate task to make sure we have control over the possible + // panics which might happen inside of it + let handle = tokio::task::spawn(async move { + // Execute the actual test + test.call(db).await; + }); + + // Get a handle of the task so we can use it later + let result = handle.await; + + // Unwind the test by closing down the connection to the database pool. This will + // be reached even when the test panicked + pool.close().await; + + // Panic here when test failed. The test fails within its own async task and stays + // there, we need to propagate it further to inform the test runtime about the result + result.unwrap(); + }); + } +} + +/// Fixture for constructing a storage provider instance backed by a pre-populated database. +/// +/// Returns a `TestDatabaseRunner` which allows to bootstrap a safe async test environment +/// connecting to a database. It makes sure the runner disconnects properly from the connection +/// pool after the test succeeded or even failed. /// -/// Returns a `TestSqlStore` containing storage provider instance, a vector of key pairs for all authors -/// in the db, and a vector of the ids for all documents. +/// Passed parameters define what the database should contain. The first entry in each log contains +/// a valid CREATE operation following entries contain duplicate UPDATE operations. If the +/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. #[fixture] -pub async fn test_db( +pub fn test_db( // Number of entries per log/document #[default(0)] no_of_entries: usize, // Number of authors, each with a log populated as defined above @@ -197,16 +282,50 @@ pub async fn test_db( #[default(doggo_test_fields())] create_operation_fields: Vec<(&'static str, OperationValue)>, // The fields used for every UPDATE operation #[default(doggo_test_fields())] update_operation_fields: Vec<(&'static str, OperationValue)>, -) -> TestSqlStore { +) -> TestDatabaseRunner { + TestDatabaseRunner { + no_of_entries, + no_of_authors, + with_delete, + schema, + create_operation_fields, + update_operation_fields, + } +} + +/// Container for `SqlStore` with access to the document ids and key_pairs used in the +/// pre-populated database for testing. +pub struct TestDatabase { + pub store: SqlStorage, + pub key_pairs: Vec, + pub documents: Vec, +} + +/// Helper method for constructing a storage provider instance backed by a pre-populated database. +/// +/// Passed parameters define what the db should contain. The first entry in each log contains a +/// valid CREATE operation following entries contain duplicate UPDATE operations. If the +/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. +/// +/// Returns a `TestDatabase` containing storage provider instance, a vector of key pairs for all +/// authors in the db, and a vector of the ids for all documents. +async fn create_test_db( + no_of_entries: usize, + no_of_authors: usize, + with_delete: bool, + schema: SchemaId, + create_operation_fields: Vec<(&'static str, OperationValue)>, + update_operation_fields: Vec<(&'static str, OperationValue)>, +) -> TestDatabase { let mut documents: Vec = Vec::new(); let key_pairs = test_key_pairs(no_of_authors); let pool = initialize_db().await; - let store = SqlStorage { pool }; + let store = SqlStorage::new(pool); // If we don't want any entries in the db return now if no_of_entries == 0 { - return TestSqlStore { + return TestDatabase { store, key_pairs, documents, @@ -278,9 +397,37 @@ pub async fn test_db( .unwrap(); } } - TestSqlStore { + + TestDatabase { store, key_pairs, documents, } } + +/// Create test database. +async fn initialize_db() -> Pool { + // Reset database first + drop_database().await; + create_database(&TEST_CONFIG.database_url).await.unwrap(); + + // Create connection pool and run all migrations + let pool = connection_pool(&TEST_CONFIG.database_url, 25) + .await + .unwrap(); + if run_pending_migrations(&pool).await.is_err() { + pool.close().await; + } + + pool +} + +// Delete test database +async fn drop_database() { + if Any::database_exists(&TEST_CONFIG.database_url) + .await + .unwrap() + { + Any::drop_database(&TEST_CONFIG.database_url).await.unwrap(); + } +} diff --git a/aquadoggo/src/db/utils.rs b/aquadoggo/src/db/utils.rs index 89973483c..4b9e0a7c0 100644 --- a/aquadoggo/src/db/utils.rs +++ b/aquadoggo/src/db/utils.rs @@ -41,100 +41,106 @@ pub fn parse_operation_rows( // - if it is a simple value type, parse it into an OperationValue and add it to the operation_fields // - if it is a relation list value type parse each item into a DocumentId/DocumentViewId and push to // the suitable vec (instantiated above) - operation_rows.iter().for_each(|row| { - match row.field_type.as_str() { - "bool" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Boolean(row.value.parse::().unwrap()), - ) - .unwrap(); - } - "int" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Integer(row.value.parse::().unwrap()), - ) - .unwrap(); - } - "float" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Float(row.value.parse::().unwrap()), - ) - .unwrap(); - } - "str" => { - operation_fields - .add(row.name.as_str(), OperationValue::Text(row.value.clone())) - .unwrap(); - } - "relation" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Relation(Relation::new( - row.value.parse::().unwrap(), - )), - ) - .unwrap(); - } - // This is a list item, so we push it to a vec but _don't_ add it - // to the operation_fields yet. - "relation_list" => { - match relation_lists.get_mut(&row.name) { - Some(list) => list.push(row.value.parse::().unwrap()), - None => { - relation_lists.insert( - row.name.clone(), - vec![row.value.parse::().unwrap()], - ); - } - }; - } - "pinned_relation" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::PinnedRelation(PinnedRelation::new( - row.value.parse::().unwrap(), - )), - ) - .unwrap(); - } - // This is a list item, so we push it to a vec but _don't_ add it - // to the operation_fields yet. - "pinned_relation_list" => { - match pinned_relation_lists.get_mut(&row.name) { - Some(list) => list.push(row.value.parse::().unwrap()), - None => { - pinned_relation_lists.insert( - row.name.clone(), - vec![row.value.parse::().unwrap()], - ); - } - }; - } - _ => (), - }; - }); + if first_row.action != "delete" { + operation_rows.iter().for_each(|row| { + let field_type = row.field_type.as_ref().unwrap(); + let field_name = row.name.as_ref().unwrap(); + let field_value = row.value.as_ref().unwrap(); + + match field_type.as_str() { + "bool" => { + operation_fields + .add( + field_name, + OperationValue::Boolean(field_value.parse::().unwrap()), + ) + .unwrap(); + } + "int" => { + operation_fields + .add( + field_name, + OperationValue::Integer(field_value.parse::().unwrap()), + ) + .unwrap(); + } + "float" => { + operation_fields + .add( + field_name, + OperationValue::Float(field_value.parse::().unwrap()), + ) + .unwrap(); + } + "str" => { + operation_fields + .add(field_name, OperationValue::Text(field_value.clone())) + .unwrap(); + } + "relation" => { + operation_fields + .add( + field_name, + OperationValue::Relation(Relation::new( + field_value.parse::().unwrap(), + )), + ) + .unwrap(); + } + // This is a list item, so we push it to a vec but _don't_ add it + // to the operation_fields yet. + "relation_list" => { + match relation_lists.get_mut(field_name) { + Some(list) => list.push(field_value.parse::().unwrap()), + None => { + relation_lists.insert( + field_name.clone(), + vec![field_value.parse::().unwrap()], + ); + } + }; + } + "pinned_relation" => { + operation_fields + .add( + field_name, + OperationValue::PinnedRelation(PinnedRelation::new( + field_value.parse::().unwrap(), + )), + ) + .unwrap(); + } + // This is a list item, so we push it to a vec but _don't_ add it + // to the operation_fields yet. + "pinned_relation_list" => { + match pinned_relation_lists.get_mut(field_name) { + Some(list) => list.push(field_value.parse::().unwrap()), + None => { + pinned_relation_lists.insert( + field_name.clone(), + vec![field_value.parse::().unwrap()], + ); + } + }; + } + _ => (), + }; + }) + }; - for (field_name, relation_list) in relation_lists { + for (ref field_name, relation_list) in relation_lists { operation_fields .add( - field_name.as_str(), + field_name, OperationValue::RelationList(RelationList::new(relation_list)), ) .unwrap(); } - for (field_name, pinned_relation_list) in pinned_relation_lists { + for (ref field_name, pinned_relation_list) in pinned_relation_lists { operation_fields .add( - field_name.as_str(), + field_name, OperationValue::PinnedRelationList(PinnedRelationList::new(pinned_relation_list)), ) .unwrap(); @@ -144,10 +150,23 @@ pub fn parse_operation_rows( "create" => Operation::new_create(schema, operation_fields), "update" => Operation::new_update( schema, - first_row.previous_operations.parse().unwrap(), + first_row + .previous_operations + .as_ref() + .unwrap() + .parse() + .unwrap(), operation_fields, ), - "delete" => Operation::new_delete(schema, first_row.previous_operations.parse().unwrap()), + "delete" => Operation::new_delete( + schema, + first_row + .previous_operations + .as_ref() + .unwrap() + .parse() + .unwrap(), + ), _ => panic!("Operation which was not CREATE, UPDATE or DELETE found."), } // Unwrap as we are sure values coming from the db are validated @@ -357,10 +376,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "age".to_string(), - field_type: "int".to_string(), - value: "28".to_string(), + previous_operations: None, + name: Some("age".to_string()), + field_type: Some("int".to_string()), + value: Some("28".to_string()), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -376,10 +395,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "height".to_string(), - field_type: "float".to_string(), - value: "3.5".to_string(), + previous_operations: None, + name: Some("height".to_string()), + field_type: Some("float".to_string()), + value: Some("3.5".to_string()), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -395,10 +414,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "is_admin".to_string(), - field_type: "bool".to_string(), - value: "false".to_string(), + previous_operations: None, + name: Some("is_admin".to_string()), + field_type: Some("bool".to_string()), + value: Some("false".to_string()), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -414,11 +433,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_profile_pictures".to_string(), - field_type: "relation_list".to_string(), - value: "0020aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - .to_string(), + previous_operations: None, + name: Some("many_profile_pictures".to_string()), + field_type: Some("relation_list".to_string()), + value: Some( + "0020aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -434,11 +455,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_profile_pictures".to_string(), - field_type: "relation_list".to_string(), - value: "0020bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" - .to_string(), + previous_operations: None, + name: Some("many_profile_pictures".to_string()), + field_type: Some("relation_list".to_string()), + value: Some( + "0020bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -454,11 +477,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_profile_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" - .to_string(), + previous_operations: None, + name: Some("many_special_profile_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -474,11 +499,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_profile_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" - .to_string(), + previous_operations: None, + name: Some("many_special_profile_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -494,11 +521,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_dog_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020bcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbc" - .to_string(), + previous_operations: None, + name: Some("many_special_dog_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020bcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbc" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -514,11 +543,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_dog_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020abababababababababababababababababababababababababababababababab" - .to_string(), + previous_operations: None, + name: Some("many_special_dog_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020abababababababababababababababababababababababababababababababab" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -534,11 +565,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "profile_picture".to_string(), - field_type: "relation".to_string(), - value: "0020eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" - .to_string(), + previous_operations: None, + name: Some("profile_picture".to_string()), + field_type: Some("relation".to_string()), + value: Some( + "0020eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -554,11 +587,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "special_profile_picture".to_string(), - field_type: "pinned_relation".to_string(), - value: "0020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" - .to_string(), + previous_operations: None, + name: Some("special_profile_picture".to_string()), + field_type: Some("pinned_relation".to_string()), + value: Some( + "0020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -574,10 +609,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "username".to_string(), - field_type: "str".to_string(), - value: "bubu".to_string(), + previous_operations: None, + name: Some("username".to_string()), + field_type: Some("str".to_string()), + value: Some("bubu".to_string()), }, ]; diff --git a/aquadoggo/src/graphql/client/mutation.rs b/aquadoggo/src/graphql/client/mutation.rs index 18badb8c7..4c1d91b61 100644 --- a/aquadoggo/src/graphql/client/mutation.rs +++ b/aquadoggo/src/graphql/client/mutation.rs @@ -87,26 +87,26 @@ mod tests { use std::convert::TryFrom; use async_graphql::{from_value, value, Request, Value, Variables}; - use bamboo_rs_core_ed25519_yasmf::entry::is_lipmaa_required; - use p2panda_rs::entry::{EntrySigned, LogId, SeqNum}; + use p2panda_rs::document::DocumentId; + use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned, LogId, SeqNum}; use p2panda_rs::hash::Hash; - use p2panda_rs::identity::Author; + use p2panda_rs::identity::{Author, KeyPair}; use p2panda_rs::operation::{Operation, OperationEncoded, OperationValue}; - use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; + use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore, StorageProvider}; use p2panda_rs::test_utils::constants::{DEFAULT_HASH, DEFAULT_PRIVATE_KEY, TEST_SCHEMA_ID}; use p2panda_rs::test_utils::fixtures::{ - entry_signed_encoded_unvalidated, key_pair, operation, operation_encoded, operation_fields, - random_hash, + create_operation, delete_operation, entry_signed_encoded_unvalidated, key_pair, operation, + operation_encoded, operation_fields, random_hash, update_operation, }; use rstest::{fixture, rstest}; use serde_json::json; use tokio::sync::broadcast; use crate::bus::ServiceMessage; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; - use crate::graphql::client::PublishEntryResponse; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::graphql::client::{EntryArgsRequest, PublishEntryResponse}; use crate::http::{build_server, HttpServiceContext}; - use crate::test_helpers::{initialize_store, TestClient}; + use crate::test_helpers::TestClient; const ENTRY_ENCODED: &str = "00bedabb435758855968b3e2de2aa1f653adfbb392fcf9cb2295a68b2eca3c\ fb030101a200204b771d59d76e820cbae493682003e99b795e4e7c86a8d6b4\ @@ -152,105 +152,112 @@ mod tests { } #[rstest] - #[tokio::test] - async fn publish_entry(publish_entry_request: Request) { - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - - let response = context.schema.execute(publish_entry_request).await; - let received: PublishEntryResponse = match response.data { - Value::Object(result_outer) => { - from_value(result_outer.get("publishEntry").unwrap().to_owned()).unwrap() - } - _ => panic!("Expected return value to be an object"), - }; + fn publish_entry(#[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + + let response = context.schema.execute(publish_entry_request).await; + let received: PublishEntryResponse = match response.data { + Value::Object(result_outer) => { + from_value(result_outer.get("publishEntry").unwrap().to_owned()).unwrap() + } + _ => panic!("Expected return value to be an object"), + }; - // The response should contain args for the next entry in the same log - let expected = PublishEntryResponse { - log_id: LogId::new(1), - seq_num: SeqNum::new(2).unwrap(), - backlink: Some( - "00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16" - .parse() - .unwrap(), - ), - skiplink: None, - }; - assert_eq!(expected, received); + // The response should contain args for the next entry in the same log + let expected = PublishEntryResponse { + log_id: LogId::new(1), + seq_num: SeqNum::new(2).unwrap(), + backlink: Some( + "00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16" + .parse() + .unwrap(), + ), + skiplink: None, + }; + assert_eq!(expected, received); + }); } #[rstest] - #[tokio::test] - async fn sends_message_on_communication_bus(publish_entry_request: Request) { - let (tx, mut rx) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - - context.schema.execute(publish_entry_request).await; - - // Find out hash of test entry to determine operation id - let entry_encoded = EntrySigned::new(ENTRY_ENCODED).unwrap(); - - // Expect receiver to receive sent message - let message = rx.recv().await.unwrap(); - assert_eq!( - message, - ServiceMessage::NewOperation(entry_encoded.hash().into()) - ); - } + fn sends_message_on_communication_bus( + #[from(test_db)] runner: TestDatabaseRunner, + publish_entry_request: Request, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, mut rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); - #[tokio::test] - async fn publish_entry_error_handling() { - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); + context.schema.execute(publish_entry_request).await; - let parameters = Variables::from_value(value!({ - "entryEncoded": ENTRY_ENCODED, - "operationEncoded": "".to_string() - })); - let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); - let response = context.schema.execute(request).await; - - assert!(response.is_err()); - assert_eq!( - "operation needs to match payload hash of encoded entry".to_string(), - response.errors[0].to_string() - ); + // Find out hash of test entry to determine operation id + let entry_encoded = EntrySigned::new(ENTRY_ENCODED).unwrap(); + + // Expect receiver to receive sent message + let message = rx.recv().await.unwrap(); + assert_eq!( + message, + ServiceMessage::NewOperation(entry_encoded.hash().into()) + ); + }); } #[rstest] - #[tokio::test] - async fn post_gql_mutation(publish_entry_request: Request) { - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - let client = TestClient::new(build_server(context)); - - let response = client - .post("/graphql") - .json(&json!({ - "query": publish_entry_request.query, - "variables": publish_entry_request.variables - } - )) - .send() - .await; - - assert_eq!( - response.json::().await, - json!({ - "data": { - "publishEntry": { - "logId":"1", - "seqNum":"2", - "backlink":"00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16", - "skiplink":null - } + fn publish_entry_error_handling(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + + let parameters = Variables::from_value(value!({ + "entryEncoded": ENTRY_ENCODED, + "operationEncoded": "".to_string() + })); + let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); + let response = context.schema.execute(request).await; + + assert!(response.is_err()); + assert_eq!( + "operation needs to match payload hash of encoded entry".to_string(), + response.errors[0].to_string() + ); + }); + } + + #[rstest] + fn post_gql_mutation( + #[from(test_db)] runner: TestDatabaseRunner, + publish_entry_request: Request, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables } - }) - ); + )) + .send() + .await; + + assert_eq!( + response.json::().await, + json!({ + "data": { + "publishEntry": { + "logId":"1", + "seqNum":"2", + "backlink":"00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16", + "skiplink":null + } + } + }) + ); + }); } #[rstest] @@ -317,12 +324,11 @@ mod tests { )] #[case::should_not_have_backlink_or_skiplink( &entry_signed_encoded_unvalidated( - 1, - 1, - Some(DEFAULT_HASH.parse().unwrap()), - Some(DEFAULT_HASH.parse().unwrap()), - Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())) -, + 1, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(DEFAULT_HASH.parse().unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())) , key_pair(DEFAULT_PRIVATE_KEY) ), OPERATION_ENCODED, @@ -354,12 +360,11 @@ mod tests { )] #[case::should_not_include_skiplink( &entry_signed_encoded_unvalidated( - 14, - 1, - Some(DEFAULT_HASH.parse().unwrap()), - Some(DEFAULT_HASH.parse().unwrap()), - Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())) -, + 14, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(DEFAULT_HASH.parse().unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), key_pair(DEFAULT_PRIVATE_KEY) ), OPERATION_ENCODED, @@ -367,124 +372,242 @@ mod tests { )] #[case::payload_hash_and_size_missing( &entry_signed_encoded_unvalidated( - 14, - 1, - Some(random_hash()), - Some(DEFAULT_HASH.parse().unwrap()), - None, + 14, + 1, + Some(random_hash()), + Some(DEFAULT_HASH.parse().unwrap()), + None, key_pair(DEFAULT_PRIVATE_KEY) ), OPERATION_ENCODED, "Could not decode payload hash DecodeError" )] #[case::backlink_and_skiplink_not_in_db( - &entry_signed_encoded_unvalidated(8, 1, Some(DEFAULT_HASH.parse().unwrap()), Some(Hash::new_from_bytes(vec![2, 3, 4]).unwrap()), Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + &entry_signed_encoded_unvalidated( + 8, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(Hash::new_from_bytes(vec![2, 3, 4]).unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), OPERATION_ENCODED, "Could not find expected backlink in database for entry with id: " )] #[case::backlink_not_in_db( - &entry_signed_encoded_unvalidated(2, 1, Some(DEFAULT_HASH.parse().unwrap()), None, Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + &entry_signed_encoded_unvalidated( + 2, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), OPERATION_ENCODED, "Could not find expected backlink in database for entry with id: " )] #[case::previous_operations_not_in_db( - &entry_signed_encoded_unvalidated(1, 1, None, None, Some(operation(Some(operation_fields(vec![("silly", OperationValue::Text("Sausage".to_string()))])), Some(DEFAULT_HASH.parse().unwrap()), None)), key_pair(DEFAULT_PRIVATE_KEY)), - &{operation_encoded(Some(operation_fields(vec![("silly", OperationValue::Text("Sausage".to_string()))])), Some(DEFAULT_HASH.parse().unwrap()), None).as_str().to_owned()}, + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some( + operation( + Some( + operation_fields( + vec![("silly", OperationValue::Text("Sausage".to_string()))] + ) + ), + Some(DEFAULT_HASH.parse().unwrap()), + None + ) + ), + key_pair(DEFAULT_PRIVATE_KEY) + ), + &{operation_encoded( + Some( + operation_fields( + vec![("silly", OperationValue::Text("Sausage".to_string()))] + ) + ), + Some(DEFAULT_HASH.parse().unwrap()), + None + ).as_str().to_owned() + }, "Could not find document for entry in database with id: " )] #[case::create_operation_with_previous_operations( - &entry_signed_encoded_unvalidated(1, 1, None, None, Some(Operation::from(&OperationEncoded::new(CREATE_OPERATION_WITH_PREVIOUS_OPS).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(CREATE_OPERATION_WITH_PREVIOUS_OPS).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), CREATE_OPERATION_WITH_PREVIOUS_OPS, "previous_operations field should be empty" )] #[case::update_operation_no_previous_operations( - &entry_signed_encoded_unvalidated(1, 1, None, None, Some(Operation::from(&OperationEncoded::new(UPDATE_OPERATION_NO_PREVIOUS_OPS).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(UPDATE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), UPDATE_OPERATION_NO_PREVIOUS_OPS, "previous_operations field can not be empty" )] #[case::delete_operation_no_previous_operations( - &entry_signed_encoded_unvalidated(1, 1, None, None, Some(Operation::from(&OperationEncoded::new(DELETE_OPERATION_NO_PREVIOUS_OPS).unwrap())), key_pair(DEFAULT_PRIVATE_KEY)), + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(DELETE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), DELETE_OPERATION_NO_PREVIOUS_OPS, "previous_operations field can not be empty" )] - #[tokio::test] - async fn invalid_requests_fail( + fn invalid_requests_fail( #[case] entry_encoded: &str, #[case] operation_encoded: &str, #[case] expected_error_message: &str, - #[future] - #[from(test_db)] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; + let entry_encoded = entry_encoded.to_string(); + let operation_encoded = operation_encoded.to_string(); + let expected_error_message = expected_error_message.to_string(); - let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(db.store, tx); - let client = TestClient::new(build_server(context)); + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); - let publish_entry_request = publish_entry_request(entry_encoded, operation_encoded); + let publish_entry_request = publish_entry_request(&entry_encoded, &operation_encoded); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; - let response = client - .post("/graphql") - .json(&json!({ - "query": publish_entry_request.query, - "variables": publish_entry_request.variables + let response = response.json::().await; + for error in response.get("errors").unwrap().as_array().unwrap() { + assert_eq!( + error.get("message").unwrap().as_str().unwrap(), + expected_error_message + ) } - )) - .send() - .await; + }); + } - let response = response.json::().await; - for error in response.get("errors").unwrap().as_array().unwrap() { - assert_eq!( - error.get("message").unwrap().as_str().unwrap(), - expected_error_message - ) - } + #[rstest] + fn publish_many_entries(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let key_pairs = vec![KeyPair::new(), KeyPair::new()]; + let num_of_entries = 100; + + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store.clone(), tx); + let client = TestClient::new(build_server(context)); + + for key_pair in &key_pairs { + let mut document: Option = None; + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + for index in 0..num_of_entries { + let next_entry_args = db + .store + .get_entry_args(&EntryArgsRequest { + author: author.clone(), + document: document.as_ref().cloned(), + }) + .await + .unwrap(); + + let operation = if index == 0 { + create_operation(&[("name", OperationValue::Text("Panda".to_string()))]) + } else if index == (num_of_entries - 1) { + delete_operation(&next_entry_args.backlink.clone().unwrap().into()) + } else { + update_operation( + &[("name", OperationValue::Text("🐼".to_string()))], + &next_entry_args.backlink.clone().unwrap().into(), + ) + }; + + let entry = Entry::new( + &next_entry_args.log_id, + Some(&operation), + next_entry_args.skiplink.as_ref(), + next_entry_args.backlink.as_ref(), + &next_entry_args.seq_num, + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry, key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); + + if index == 0 { + document = Some(entry_encoded.hash().into()); + } + + // Prepare a publish entry request for each entry. + let publish_entry_request = + publish_entry_request(entry_encoded.as_str(), operation_encoded.as_str()); + + // Publish the entry. + let result = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + assert!(result.status().is_success()) + } + } + }); } #[rstest] - #[tokio::test] - async fn publish_many_entries( + fn duplicate_publishing_of_entries( #[from(test_db)] - #[future] - #[with(100, 1, true, TEST_SCHEMA_ID.parse().unwrap())] - db: TestSqlStore, + #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap())] + runner: TestDatabaseRunner, ) { - // test db populated with 100 entries. - let populated_db = db.await; - // Get the author. - let author = Author::try_from( - populated_db - .key_pairs - .first() - .unwrap() - .public_key() - .to_owned(), - ) - .unwrap(); - - // Setup the server and client with a new empty store. - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - let client = TestClient::new(build_server(context)); - - // Get the entries from the prepopulated store. - let mut entries = populated_db - .store - .get_entries_by_schema(&TEST_SCHEMA_ID.parse().unwrap()) - .await - .unwrap(); - - // Sort them by seq_num. - entries.sort_by_key(|entry| entry.seq_num().as_u64()); - - for entry in entries { + runner.with_db_teardown(|populated_db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(populated_db.store.clone(), tx); + let client = TestClient::new(build_server(context)); + + // Get the entries from the prepopulated store. + let mut entries = populated_db + .store + .get_entries_by_schema(&TEST_SCHEMA_ID.parse().unwrap()) + .await + .unwrap(); + + // Sort them by seq_num. + entries.sort_by_key(|entry| entry.seq_num().as_u64()); + + let duplicate_entry = entries.first().unwrap(); + // Prepare a publish entry request for each entry. let publish_entry_request = publish_entry_request( - entry.entry_signed().as_str(), - entry.operation_encoded().unwrap().as_str(), + duplicate_entry.entry_signed().as_str(), + duplicate_entry.operation_encoded().unwrap().as_str(), ); // Publish the entry and parse response. @@ -499,105 +622,13 @@ mod tests { .await; let response = response.json::().await; - let publish_entry_response = response.get("data").unwrap().get("publishEntry").unwrap(); - - // Calculate the skiplink we expect in the repsonse. - let next_seq_num = entry.seq_num().next().unwrap(); - let skiplink_seq_num = next_seq_num.skiplink_seq_num(); - let skiplink_entry = match skiplink_seq_num { - Some(seq_num) if is_lipmaa_required(next_seq_num.as_u64()) => populated_db - .store - .get_entry_at_seq_num(&author, &entry.log_id(), &seq_num) - .await - .unwrap() - .map(|entry| entry.hash().as_str().to_owned()), - _ => None, - }; - // Assert the returned log_id, seq_num, backlink and skiplink match our expectations. - assert_eq!( - publish_entry_response - .get("logId") - .unwrap() - .as_str() - .unwrap(), - "1" - ); - assert_eq!( - publish_entry_response - .get("seqNum") - .unwrap() - .as_str() - .unwrap(), - next_seq_num.as_u64().to_string() - ); - assert_eq!( - publish_entry_response - .get("skiplink") - .unwrap() - .as_str() - .map(|hash| hash.to_string()), - skiplink_entry - ); - assert_eq!( - publish_entry_response - .get("backlink") - .unwrap() - .as_str() - .unwrap(), - entry.hash().as_str() - ); - } - } - - #[rstest] - #[tokio::test] - async fn duplicate_publishing_of_entries( - #[from(test_db)] - #[future] - #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap())] - db: TestSqlStore, - ) { - let populated_db = db.await; - - let (tx, _rx) = broadcast::channel(16); - let context = HttpServiceContext::new(populated_db.store.clone(), tx); - let client = TestClient::new(build_server(context)); - - // Get the entries from the prepopulated store. - let mut entries = populated_db - .store - .get_entries_by_schema(&TEST_SCHEMA_ID.parse().unwrap()) - .await - .unwrap(); - - // Sort them by seq_num. - entries.sort_by_key(|entry| entry.seq_num().as_u64()); - - let duplicate_entry = entries.first().unwrap(); - - // Prepare a publish entry request for each entry. - let publish_entry_request = publish_entry_request( - duplicate_entry.entry_signed().as_str(), - duplicate_entry.operation_encoded().unwrap().as_str(), - ); - - // Publish the entry and parse response. - let response = client - .post("/graphql") - .json(&json!({ - "query": publish_entry_request.query, - "variables": publish_entry_request.variables + // @TODO: This currently throws an internal SQL error to the API user, I think we'd + // like a nicer error message here: + // https://github.com/p2panda/aquadoggo/issues/159 + for error in response.get("errors").unwrap().as_array().unwrap() { + assert!(error.get("message").is_some()) } - )) - .send() - .await; - - let response = response.json::().await; - - // TODO: I think we'd like a nicer error message here: https://github.com/p2panda/aquadoggo/issues/159 - for error in response.get("errors").unwrap().as_array().unwrap() { - assert_eq!(error.get("message").unwrap().as_str().unwrap(), "Error occured during `LogStorage` request in storage provider: error returned from database: UNIQUE constraint failed: logs.author, logs.log_id") - } + }); } } diff --git a/aquadoggo/src/graphql/client/query.rs b/aquadoggo/src/graphql/client/query.rs index 0ecc1a6af..90a20e585 100644 --- a/aquadoggo/src/graphql/client/query.rs +++ b/aquadoggo/src/graphql/client/query.rs @@ -49,84 +49,88 @@ impl ClientRoot { mod tests { use async_graphql::Response; use p2panda_rs::entry::{LogId, SeqNum}; + use rstest::rstest; use serde_json::json; use tokio::sync::broadcast; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::graphql::client::EntryArgsResponse; use crate::http::build_server; use crate::http::HttpServiceContext; - use crate::test_helpers::{initialize_store, TestClient}; + use crate::test_helpers::TestClient; - #[tokio::test] - async fn next_entry_args_valid_query() { - let (tx, _) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - let client = TestClient::new(build_server(context)); + #[rstest] + fn next_entry_args_valid_query(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); - // Selected fields need to be alphabetically sorted because that's what the `json` macro - // that is used in the assert below produces. - let response = client - .post("/graphql") - .json(&json!({ - "query": r#"{ - nextEntryArgs( - publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a" - ) { - logId, - seqNum, - backlink, - skiplink - } - }"#, - })) - .send() - .await - .json::() - .await; + // Selected fields need to be alphabetically sorted because that's what the `json` macro + // that is used in the assert below produces. + let response = client + .post("/graphql") + .json(&json!({ + "query": r#"{ + nextEntryArgs( + publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a" + ) { + logId, + seqNum, + backlink, + skiplink + } + }"#, + })) + .send() + .await + .json::() + .await; - let expected_entry_args = EntryArgsResponse { - log_id: LogId::new(1), - seq_num: SeqNum::new(1).unwrap(), - backlink: None, - skiplink: None, - }; - let received_entry_args: EntryArgsResponse = match response.data { - async_graphql::Value::Object(result_outer) => { - async_graphql::from_value(result_outer.get("nextEntryArgs").unwrap().to_owned()) - .unwrap() - } - _ => panic!("Expected return value to be an object"), - }; + let expected_entry_args = EntryArgsResponse { + log_id: LogId::new(1), + seq_num: SeqNum::new(1).unwrap(), + backlink: None, + skiplink: None, + }; + let received_entry_args: EntryArgsResponse = match response.data { + async_graphql::Value::Object(result_outer) => { + async_graphql::from_value(result_outer.get("nextEntryArgs").unwrap().to_owned()) + .unwrap() + } + _ => panic!("Expected return value to be an object"), + }; - assert_eq!(received_entry_args, expected_entry_args); + assert_eq!(received_entry_args, expected_entry_args); + }) } - #[tokio::test] - async fn next_entry_args_error_response() { - let (tx, _) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - let client = TestClient::new(build_server(context)); + #[rstest] + fn next_entry_args_error_response(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); - // Selected fields need to be alphabetically sorted because that's what the `json` macro - // that is used in the assert below produces. - let response = client - .post("/graphql") - .json(&json!({ - "query": r#"{ + // Selected fields need to be alphabetically sorted because that's what the `json` macro + // that is used in the assert below produces. + let response = client + .post("/graphql") + .json(&json!({ + "query": r#"{ nextEntryArgs(publicKey: "nope") { logId } }"#, - })) - .send() - .await; + })) + .send() + .await; - let response: Response = response.json().await; - assert_eq!( - response.errors[0].message, - "invalid hex encoding in author string" - ) + let response: Response = response.json().await; + assert_eq!( + response.errors[0].message, + "invalid hex encoding in author string" + ) + }) } } diff --git a/aquadoggo/src/http/service.rs b/aquadoggo/src/http/service.rs index 1651052d7..1fb9992c4 100644 --- a/aquadoggo/src/http/service.rs +++ b/aquadoggo/src/http/service.rs @@ -57,39 +57,42 @@ pub async fn http_service(context: Context, signal: Shutdown, tx: ServiceSender) #[cfg(test)] mod tests { + use rstest::rstest; use serde_json::json; use tokio::sync::broadcast; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::http::context::HttpServiceContext; - use crate::test_helpers::{initialize_store, TestClient}; + use crate::test_helpers::TestClient; use super::build_server; - #[tokio::test] - async fn graphql_endpoint() { - let (tx, _) = broadcast::channel(16); - let store = initialize_store().await; - let context = HttpServiceContext::new(store, tx); - let client = TestClient::new(build_server(context)); - - let response = client - .post("/graphql") - .json(&json!({ - "query": "{ __schema { __typename } }", - })) - .send() - .await; - - assert_eq!( - response.text().await, - json!({ - "data": { - "__schema": { - "__typename": "__Schema" + #[rstest] + fn graphql_endpoint(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let response = client + .post("/graphql") + .json(&json!({ + "query": "{ __schema { __typename } }", + })) + .send() + .await; + + assert_eq!( + response.text().await, + json!({ + "data": { + "__schema": { + "__typename": "__Schema" + } } - } - }) - .to_string() - ); + }) + .to_string() + ); + }) } } diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index c323bf0d4..c7b5ceb5f 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -140,7 +140,7 @@ mod tests { use tokio::task; use crate::context::Context; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::{Task, TaskInput}; use crate::Configuration; @@ -148,142 +148,138 @@ mod tests { use super::materializer_service; #[rstest] - #[tokio::test] - async fn materialize_document_from_bus( + fn materialize_document_from_bus( #[from(test_db)] #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("name", OperationValue::Text("panda".into()))])] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - let db = db.await; - - // Identify document and operation which was inserted for testing - let document_id = db.documents.first().unwrap(); - let verified_operation = db - .store - .get_operations_by_document_id(document_id) - .await - .unwrap() - .first() - .unwrap() - .to_owned(); - - // We expect that the database does not contain any materialized document yet - assert!(db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .is_none()); - - // Prepare arguments for service - let context = Context::new(db.store.clone(), Configuration::default()); - let shutdown = task::spawn(async { - loop { - // Do this forever .. this means that the shutdown handler will never resolve - tokio::time::sleep(Duration::from_millis(100)).await; - } - }); - let (tx, _) = broadcast::channel(1024); + runner.with_db_teardown(|db: TestDatabase| async move { + // Identify document and operation which was inserted for testing + let document_id = db.documents.first().unwrap(); + let verified_operation = db + .store + .get_operations_by_document_id(document_id) + .await + .unwrap() + .first() + .unwrap() + .to_owned(); - // Start materializer service - let tx_clone = tx.clone(); - let handle = tokio::spawn(async move { - materializer_service(context, shutdown, tx_clone) + // We expect that the database does not contain any materialized document yet + assert!(db + .store + .get_document_by_id(document_id) .await - .unwrap(); - }); + .unwrap() + .is_none()); + + // Prepare arguments for service + let context = Context::new(db.store.clone(), Configuration::default()); + let shutdown = task::spawn(async { + loop { + // Do this forever .. this means that the shutdown handler will never resolve + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + let (tx, _) = broadcast::channel(1024); + + // Start materializer service + let tx_clone = tx.clone(); + let handle = tokio::spawn(async move { + materializer_service(context, shutdown, tx_clone) + .await + .unwrap(); + }); + + // Wait for service to be ready .. + tokio::time::sleep(Duration::from_millis(50)).await; + + // Send a message over the bus which kicks in materialization + tx.send(crate::bus::ServiceMessage::NewOperation( + verified_operation.operation_id().to_owned(), + )) + .unwrap(); + + // Wait a little bit for work being done .. + tokio::time::sleep(Duration::from_millis(100)).await; - // Wait for service to be ready .. - tokio::time::sleep(Duration::from_millis(50)).await; - - // Send a message over the bus which kicks in materialization - tx.send(crate::bus::ServiceMessage::NewOperation( - verified_operation.operation_id().to_owned(), - )) - .unwrap(); - - // Wait a little bit for work being done .. - tokio::time::sleep(Duration::from_millis(100)).await; - - // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); - - // Check database for materialized documents - let document = db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .expect("We expect that the document is `Some`"); - assert_eq!(document.id().as_str(), document_id.as_str()); - assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), - OperationValue::Text("panda".into()) - ); + // Make sure the service did not crash and is still running + assert_eq!(handle.is_finished(), false); + + // Check database for materialized documents + let document = db + .store + .get_document_by_id(document_id) + .await + .unwrap() + .expect("We expect that the document is `Some`"); + assert_eq!(document.id().as_str(), document_id.as_str()); + assert_eq!( + document.fields().get("name").unwrap().value().to_owned(), + OperationValue::Text("panda".into()) + ); + }); } #[rstest] - #[tokio::test] - async fn materialize_document_from_last_runtime( + fn materialize_document_from_last_runtime( #[from(test_db)] #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("name", OperationValue::Text("panda".into()))])] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { // Prepare database which inserts data for one document - let db = db.await; - - // Identify document and operation which was inserted for testing - let document_id = db.documents.first().unwrap(); - - // Store a pending "reduce" task from last runtime in the database so it gets picked up by - // the materializer service - db.store - .insert_task(&Task::new( - "reduce", - TaskInput::new(Some(document_id.to_owned()), None), - )) - .await - .unwrap(); - - // Prepare arguments for service - let context = Context::new(db.store.clone(), Configuration::default()); - let shutdown = task::spawn(async { - loop { - // Do this forever .. this means that the shutdown handler will never resolve - tokio::time::sleep(Duration::from_millis(100)).await; - } - }); - let (tx, _) = broadcast::channel(1024); - - // Start materializer service - let tx_clone = tx.clone(); - let handle = tokio::spawn(async move { - materializer_service(context, shutdown, tx_clone) + runner.with_db_teardown(|db: TestDatabase| async move { + // Identify document and operation which was inserted for testing + let document_id = db.documents.first().unwrap(); + + // Store a pending "reduce" task from last runtime in the database so it gets picked up by + // the materializer service + db.store + .insert_task(&Task::new( + "reduce", + TaskInput::new(Some(document_id.to_owned()), None), + )) .await .unwrap(); - }); - // Wait for service to be done .. it should materialize the document since it was waiting - // as a "pending" task in the database - tokio::time::sleep(Duration::from_millis(100)).await; - - // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); - - // Check database for materialized documents - let document = db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .expect("We expect that the document is `Some`"); - assert_eq!(document.id().as_str(), document_id.as_str()); - assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), - OperationValue::Text("panda".into()) - ); + // Prepare arguments for service + let context = Context::new(db.store.clone(), Configuration::default()); + let shutdown = task::spawn(async { + loop { + // Do this forever .. this means that the shutdown handler will never resolve + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + let (tx, _) = broadcast::channel(1024); + + // Start materializer service + let tx_clone = tx.clone(); + let handle = tokio::spawn(async move { + materializer_service(context, shutdown, tx_clone) + .await + .unwrap(); + }); + + // Wait for service to be done .. it should materialize the document since it was waiting + // as a "pending" task in the database + tokio::time::sleep(Duration::from_millis(100)).await; + + // Make sure the service did not crash and is still running + assert_eq!(handle.is_finished(), false); + + // Check database for materialized documents + let document = db + .store + .get_document_by_id(document_id) + .await + .unwrap() + .expect("We expect that the document is `Some`"); + assert_eq!(document.id().as_str(), document_id.as_str()); + assert_eq!( + document.fields().get("name").unwrap().value().to_owned(), + OperationValue::Text("panda".into()) + ); + }); } } diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index e9f8c5309..5f8fb929b 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -115,7 +115,9 @@ mod tests { use crate::config::Configuration; use crate::context::Context; - use crate::db::stores::test_utils::{insert_entry_operation_and_view, test_db, TestSqlStore}; + use crate::db::stores::test_utils::{ + insert_entry_operation_and_view, test_db, TestDatabase, TestDatabaseRunner, + }; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; use crate::materializer::TaskInput; @@ -123,183 +125,266 @@ mod tests { use super::dependency_task; #[rstest] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], - vec![]), 0)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("favorite_book_images", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![]), 0)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("something_from_the_past", OperationValue::PinnedRelation(PinnedRelation::new(random_document_view_id())))], - vec![]), 1)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("many_previous_drafts", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect())))], - vec![]), 2)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![]), 2)] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], + vec![] + ), + 0 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("favorite_book_images", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![] + ), + 0 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("something_from_the_past", OperationValue::PinnedRelation( + PinnedRelation::new(random_document_view_id()))) + ], + vec![] + ), + 1 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("many_previous_drafts", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))) + ], + vec![] + ), + 2 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![] + ), + 2 + )] // This document has been updated - #[case(test_db(4, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 3].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 10].iter().map(|_|random_document_id()).collect())))], - ), 3)] - #[tokio::test] - async fn dispatches_reduce_tasks_for_pinned_child_dependencies( - #[case] - #[future] - db: TestSqlStore, + #[case( + test_db( + 4, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 3].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 10].iter().map(|_|random_document_id()).collect()))) + ], + ), + 3 + )] + fn dispatches_reduce_tasks_for_pinned_child_dependencies( + #[case] runner: TestDatabaseRunner, #[case] expected_next_tasks: usize, ) { - let db = db.await; - let context = Context::new(db.store.clone(), Configuration::default()); - - for document_id in &db.documents { - let input = TaskInput::new(Some(document_id.clone()), None); - reduce_task(context.clone(), input).await.unwrap().unwrap(); - } - - for document_id in &db.documents { - let document_view = db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .unwrap(); + runner.with_db_teardown(move |db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); - let input = TaskInput::new(None, Some(document_view.id().clone())); + for document_id in &db.documents { + let input = TaskInput::new(Some(document_id.clone()), None); + reduce_task(context.clone(), input).await.unwrap().unwrap(); + } - let reduce_tasks = dependency_task(context.clone(), input) - .await - .unwrap() - .unwrap(); - assert_eq!(reduce_tasks.len(), expected_next_tasks); - for task in reduce_tasks { - assert_eq!(task.worker_name(), "reduce") + for document_id in &db.documents { + let document_view = db + .store + .get_document_by_id(document_id) + .await + .unwrap() + .unwrap(); + + let input = TaskInput::new(None, Some(document_view.id().clone())); + + let reduce_tasks = dependency_task(context.clone(), input) + .await + .unwrap() + .unwrap(); + assert_eq!(reduce_tasks.len(), expected_next_tasks); + for task in reduce_tasks { + assert_eq!(task.worker_name(), "reduce") + } } - } + }); } #[rstest] - #[tokio::test] - async fn no_reduce_task_for_materialised_document_relations( + fn no_reduce_task_for_materialised_document_relations( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new(db.store.clone(), Configuration::default()); - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); + let document_id = db.documents[0].clone(); - let input = TaskInput::new(Some(document_id.clone()), None); - reduce_task(context.clone(), input).await.unwrap().unwrap(); + let input = TaskInput::new(Some(document_id.clone()), None); + reduce_task(context.clone(), input).await.unwrap().unwrap(); - // Here we have one materialised document, (we are calling it a child as we will shortly be publishing parents) - // it contains relations which are not materialised yet so should dispatch a reduce task for each one. + // Here we have one materialised document, (we are calling it a child as we will + // shortly be publishing parents) it contains relations which are not materialised yet + // so should dispatch a reduce task for each one. + let document_view_of_child = db + .store + .get_document_by_id(&document_id) + .await + .unwrap() + .unwrap(); - let document_view_of_child = db - .store - .get_document_by_id(&document_id) - .await - .unwrap() - .unwrap(); - - let document_view_id_of_child = document_view_of_child.id(); - - // Create a new document referencing the existing materialised document. - - let operation = create_operation(&[ - ( - "pinned_relation_to_existing_document", - OperationValue::PinnedRelation(PinnedRelation::new( - document_view_id_of_child.clone(), - )), - ), - ( - "pinned_relation_to_not_existing_document", - OperationValue::PinnedRelation(PinnedRelation::new(random_document_view_id())), - ), - ]); - - let (_, document_view_id) = - insert_entry_operation_and_view(&db.store, &KeyPair::new(), None, &operation).await; - - // The new document should now dispatch one dependency task for the child relation which - // has not been materialised yet. - let input = TaskInput::new(None, Some(document_view_id.clone())); - let tasks = dependency_task(context.clone(), input) - .await - .unwrap() - .unwrap(); + let document_view_id_of_child = document_view_of_child.id(); + + // Create a new document referencing the existing materialised document. + + let operation = create_operation(&[ + ( + "pinned_relation_to_existing_document", + OperationValue::PinnedRelation(PinnedRelation::new( + document_view_id_of_child.clone(), + )), + ), + ( + "pinned_relation_to_not_existing_document", + OperationValue::PinnedRelation(PinnedRelation::new(random_document_view_id())), + ), + ]); + + let (_, document_view_id) = + insert_entry_operation_and_view(&db.store, &KeyPair::new(), None, &operation).await; + + // The new document should now dispatch one dependency task for the child relation which + // has not been materialised yet. + let input = TaskInput::new(None, Some(document_view_id.clone())); + let tasks = dependency_task(context.clone(), input) + .await + .unwrap() + .unwrap(); - assert_eq!(tasks.len(), 1); - assert_eq!(tasks[0].worker_name(), "reduce"); + assert_eq!(tasks.len(), 1); + assert_eq!(tasks[0].worker_name(), "reduce"); + }); } #[rstest] - #[should_panic(expected = "Critical")] #[case(None, Some(random_document_view_id()))] - #[should_panic(expected = "Critical")] #[case(None, None)] - #[should_panic(expected = "Critical")] #[case(Some(random_document_id()), None)] - #[should_panic(expected = "Critical")] #[case(Some(random_document_id()), Some(random_document_view_id()))] - #[tokio::test] - async fn fails_correctly( + fn fails_correctly( #[case] document_id: Option, #[case] document_view_id: Option, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new(db.store, Configuration::default()); - let input = TaskInput::new(document_id, document_view_id); + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); + let input = TaskInput::new(document_id, document_view_id); - let next_tasks = dependency_task(context.clone(), input).await.unwrap(); - assert!(next_tasks.is_none()) + let next_tasks = dependency_task(context.clone(), input).await; + assert!(next_tasks.is_err()) + }); } #[rstest] - #[should_panic(expected = "Critical")] - #[case(test_db(2, 1, true, TEST_SCHEMA_ID.parse().unwrap(), - vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], - vec![]))] - #[should_panic(expected = "Critical")] - #[case(test_db(2, 1, true, TEST_SCHEMA_ID.parse().unwrap(), - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![]))] - #[tokio::test] - async fn fails_on_deleted_documents( - #[case] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - let context = Context::new(db.store.clone(), Configuration::default()); - let document_id = db.documents[0].clone(); + #[case( + test_db( + 2, + 1, + true, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("profile_picture", OperationValue::Relation( + Relation::new(random_document_id()))) + ], + vec![] + ) + )] + #[case( + test_db( + 2, + 1, + true, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![] + ) + )] + fn fails_on_deleted_documents(#[case] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); + let document_id = db.documents[0].clone(); - let input = TaskInput::new(Some(document_id.clone()), None); - reduce_task(context.clone(), input).await.unwrap(); + let input = TaskInput::new(Some(document_id.clone()), None); + reduce_task(context.clone(), input).await.unwrap(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document_view_id: DocumentViewId = document_operations[1].operation_id().clone().into(); + let document_view_id: DocumentViewId = + document_operations[1].operation_id().clone().into(); - let input = TaskInput::new(None, Some(document_view_id.clone())); + let input = TaskInput::new(None, Some(document_view_id.clone())); - dependency_task(context.clone(), input) - .await - .unwrap() - .unwrap(); + let result = dependency_task(context.clone(), input).await; + + assert!(result.is_err()) + }); } } diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index 279d8ad04..7ea613ea0 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -161,170 +161,193 @@ mod tests { use crate::config::Configuration; use crate::context::Context; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; use crate::materializer::TaskInput; #[rstest] - #[tokio::test] - async fn reduces_documents( + fn reduces_documents( #[from(test_db)] - #[with(2, 20, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] - #[future] - db: TestSqlStore, + #[with( + 2, + 20, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + )] + runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new(db.store, Configuration::default()); + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); - for document_id in &db.documents { - let input = TaskInput::new(Some(document_id.clone()), None); - assert!(reduce_task(context.clone(), input).await.is_ok()); - } + for document_id in &db.documents { + let input = TaskInput::new(Some(document_id.clone()), None); + assert!(reduce_task(context.clone(), input).await.is_ok()); + } - for document_id in &db.documents { - let document_view = context.store.get_document_by_id(document_id).await.unwrap(); + for document_id in &db.documents { + let document_view = context.store.get_document_by_id(document_id).await.unwrap(); - assert_eq!( - document_view.unwrap().get("username").unwrap().value(), - &OperationValue::Text("PANDA".to_string()) - ) - } + assert_eq!( + document_view.unwrap().get("username").unwrap().value(), + &OperationValue::Text("PANDA".to_string()) + ) + } + }); } #[rstest] - #[tokio::test] - async fn reduces_document_to_specific_view_id( + fn reduces_document_to_specific_view_id( #[from(test_db)] - #[with(2, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] - #[future] - db: TestSqlStore, + #[with( + 2, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + )] + runner: TestDatabaseRunner, ) { - let db = db.await; - - let document_operations = db - .store - .get_operations_by_document_id(&db.documents[0]) - .await - .unwrap(); - - let document = DocumentBuilder::new(document_operations).build().unwrap(); - let mut sorted_document_operations = document.operations().clone(); - - let document_view_id: DocumentViewId = sorted_document_operations - .pop() - .unwrap() - .operation_id() - .clone() - .into(); - - let context = Context::new(db.store.clone(), Configuration::default()); - let input = TaskInput::new(None, Some(document_view_id.clone())); - - assert!(reduce_task(context.clone(), input).await.is_ok()); - - let document_view = db - .store - .get_document_view_by_id(&document_view_id) - .await - .unwrap(); - - assert_eq!( - document_view.unwrap().get("username").unwrap().value(), - &OperationValue::Text("PANDA".to_string()) - ); - - // We didn't reduce this document_view_id so it shouldn't exist in the db. - let document_view_id: DocumentViewId = sorted_document_operations - .pop() - .unwrap() - .operation_id() - .clone() - .into(); - - let document_view = db - .store - .get_document_view_by_id(&document_view_id) - .await - .unwrap(); - - assert!(document_view.is_none()); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_operations = db + .store + .get_operations_by_document_id(&db.documents[0]) + .await + .unwrap(); + + let document = DocumentBuilder::new(document_operations).build().unwrap(); + let mut sorted_document_operations = document.operations().clone(); + + let document_view_id: DocumentViewId = sorted_document_operations + .pop() + .unwrap() + .operation_id() + .clone() + .into(); + + let context = Context::new(db.store.clone(), Configuration::default()); + let input = TaskInput::new(None, Some(document_view_id.clone())); + + assert!(reduce_task(context.clone(), input).await.is_ok()); + + let document_view = db + .store + .get_document_view_by_id(&document_view_id) + .await + .unwrap(); + + assert_eq!( + document_view.unwrap().get("username").unwrap().value(), + &OperationValue::Text("PANDA".to_string()) + ); + + // We didn't reduce this document_view_id so it shouldn't exist in the db. + let document_view_id: DocumentViewId = sorted_document_operations + .pop() + .unwrap() + .operation_id() + .clone() + .into(); + + let document_view = db + .store + .get_document_view_by_id(&document_view_id) + .await + .unwrap(); + + assert!(document_view.is_none()); + }); } #[rstest] - #[tokio::test] - async fn deleted_documents_have_no_view( + fn deleted_documents_have_no_view( #[from(test_db)] #[with(3, 20, true)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new(db.store.clone(), Configuration::default()); + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); - for document_id in &db.documents { - let input = TaskInput::new(Some(document_id.clone()), None); - let tasks = reduce_task(context.clone(), input).await.unwrap(); - assert!(tasks.is_none()); - } + for document_id in &db.documents { + let input = TaskInput::new(Some(document_id.clone()), None); + let tasks = reduce_task(context.clone(), input).await.unwrap(); + assert!(tasks.is_none()); + } - for document_id in &db.documents { - let document_view = context.store.get_document_by_id(document_id).await.unwrap(); - assert!(document_view.is_none()) - } + for document_id in &db.documents { + let document_view = context.store.get_document_by_id(document_id).await.unwrap(); + assert!(document_view.is_none()) + } - let document_operations = context - .store - .get_operations_by_document_id(&db.documents[0]) - .await - .unwrap(); + let document_operations = context + .store + .get_operations_by_document_id(&db.documents[0]) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let input = TaskInput::new(None, Some(document.view_id().clone())); - let tasks = reduce_task(context.clone(), input).await.unwrap(); + let input = TaskInput::new(None, Some(document.view_id().clone())); + let tasks = reduce_task(context.clone(), input).await.unwrap(); - assert!(tasks.is_none()); + assert!(tasks.is_none()); + }); } #[rstest] - #[case(test_db(3, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))]), true)] + #[case( + test_db( + 3, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + ), + true + )] // This document is deleted, it shouldn't spawn a dependency task. - #[case(test_db(3, 1, true, TEST_SCHEMA_ID.parse().unwrap(), - vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))]), false)] - #[tokio::test] - async fn returns_dependency_task_inputs( - #[case] - #[future] - db: TestSqlStore, + #[case( + test_db( + 3, + 1, + true, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + ), + false + )] + fn returns_dependency_task_inputs( + #[case] runner: TestDatabaseRunner, #[case] is_next_task: bool, ) { - let db = db.await; - let context = Context::new(db.store.clone(), Configuration::default()); - let document_id = db.documents[0].clone(); + runner.with_db_teardown(move |db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); + let document_id = db.documents[0].clone(); - let input = TaskInput::new(Some(document_id.clone()), None); - let next_task_inputs = reduce_task(context.clone(), input).await.unwrap(); + let input = TaskInput::new(Some(document_id.clone()), None); + let next_task_inputs = reduce_task(context.clone(), input).await.unwrap(); - assert_eq!(next_task_inputs.is_some(), is_next_task); + assert_eq!(next_task_inputs.is_some(), is_next_task); + }); } #[rstest] - #[should_panic(expected = "Critical")] #[case(None, None)] - #[tokio::test] - async fn fails_correctly( + fn fails_correctly( #[case] document_id: Option, #[case] document_view_id: Option, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new(db.store, Configuration::default()); - let input = TaskInput::new(document_id, document_view_id); + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new(db.store.clone(), Configuration::default()); + let input = TaskInput::new(document_id, document_view_id); - reduce_task(context.clone(), input).await.unwrap(); + assert!(reduce_task(context.clone(), input).await.is_err()); + }); } } diff --git a/aquadoggo/src/test_helpers.rs b/aquadoggo/src/test_helpers.rs index 029324525..ebb8e70ab 100644 --- a/aquadoggo/src/test_helpers.rs +++ b/aquadoggo/src/test_helpers.rs @@ -1,6 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use std::convert::TryFrom; +use std::fmt::Debug; use std::net::{SocketAddr, TcpListener}; use axum::body::HttpBody; @@ -8,17 +9,37 @@ use axum::BoxError; use http::header::{HeaderName, HeaderValue}; use http::{Request, StatusCode}; use hyper::{Body, Server}; -use p2panda_rs::hash::Hash; -use rand::Rng; -use sqlx::any::Any; -use sqlx::migrate::MigrateDatabase; +use once_cell::sync::Lazy; +use serde::Deserialize; use tower::make::Shared; use tower_service::Service; -use crate::db::provider::SqlStorage; -use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; +/// Configuration used in test helper methods. +#[derive(Deserialize, Debug)] +#[serde(default)] +pub struct TestConfiguration { + /// Database url (sqlite or postgres) + pub database_url: String, +} + +impl TestConfiguration { + /// Create a new configuration object for test environments. + pub fn new() -> Self { + envy::from_env::() + .expect("Could not read environment variables for test configuration") + } +} + +impl Default for TestConfiguration { + fn default() -> Self { + Self { + /// SQLite database stored in memory. + database_url: "sqlite::memory:".into(), + } + } +} -const DB_URL: &str = "sqlite::memory:"; +pub static TEST_CONFIG: Lazy = Lazy::new(|| TestConfiguration::new()); pub(crate) struct TestClient { client: reqwest::Client, @@ -127,39 +148,3 @@ impl TestResponse { self.response.status() } } - -// Create test database -pub async fn initialize_db() -> Pool { - // Reset database first - drop_database().await; - create_database(DB_URL).await.unwrap(); - - // Create connection pool and run all migrations - let pool = connection_pool(DB_URL, 5).await.unwrap(); - run_pending_migrations(&pool).await.unwrap(); - - pool -} - -// Create storage provider API around test database -pub async fn initialize_store() -> SqlStorage { - let pool = initialize_db().await; - SqlStorage::new(pool) -} - -// Delete test database -pub async fn drop_database() { - if Any::database_exists(DB_URL).await.unwrap() { - Any::drop_database(DB_URL).await.unwrap(); - } -} - -// Generate random entry hash -pub fn random_entry_hash() -> String { - let random_data = rand::thread_rng().gen::<[u8; 32]>().to_vec(); - - Hash::new_from_bytes(random_data) - .unwrap() - .as_str() - .to_owned() -} diff --git a/aquadoggo_cli/README.md b/aquadoggo_cli/README.md index b99156767..c3f4099ad 100644 --- a/aquadoggo_cli/README.md +++ b/aquadoggo_cli/README.md @@ -18,7 +18,7 @@ OPTIONS: ## Environment variables -* `DATABASE_URL` Database url (SQLite, MySQL, PostgreSQL) (default `sqlite:/aquadoggo-node.sqlite3`). +* `DATABASE_URL` Database url (SQLite, PostgreSQL) (default `sqlite:/aquadoggo-node.sqlite3`). * `DATABASE_MAX_CONNECTIONS` Maximum number of database connections in pool (default `32`). * `HTTP_PORT` RPC API HTTP server port (default `2020`). * `HTTP_THREADS` Number of HTTP server threads to run (default `4`).