diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 38806ce4e..8da77bd35 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -43,10 +43,58 @@ jobs: with: command: test args: --manifest-path ${{ env.cargo_manifest }} - # Ensure debug output is also tested env: + # Ensure debug output is also tested RUST_LOG: debug + rust-test-postgres: + runs-on: ubuntu-latest + + services: + postgres: + image: postgres:latest + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: aquadoggo-development + ports: + # Maps TCP port 5432 on service container to the host + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup Rust toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Restore from cargo cache + uses: actions/cache@v3 + with: + path: ${{ env.cache_path }} + key: ${{ runner.os }}-test-${{ hashFiles('**/Cargo.lock') }} + + - name: Run tests + uses: actions-rs/cargo@v1 + with: + command: test + # Make sure the tests run consecutively to avoid accessing the same + # database by multiple test threads + args: >- + --manifest-path ${{ env.cargo_manifest }} + -- --test-threads 1 + env: + DATABASE_URL: postgresql://postgres:postgres@localhost:5432/aquadoggo-development + rust-check: runs-on: ubuntu-latest @@ -151,9 +199,12 @@ jobs: uses: actions-rs/tarpaulin@v0.1 with: # Force cleaning via `--force-clean` flag to prevent buggy code coverage - args: --manifest-path ${{ env.cargo_manifest }} --locked --force-clean - # Ensure debug output is also tested + args: >- + --manifest-path ${{ env.cargo_manifest }} + --locked + --force-clean env: + # Ensure debug output is also tested RUST_LOG: debug - name: Upload to codecov.io diff --git a/CHANGELOG.md b/CHANGELOG.md index 99f3410af..ad6e20993 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Reduce and dependency tasks [#144](https://github.com/p2panda/aquadoggo/pull/144) - GraphQL endpoints for replication [#100](https://github.com/p2panda/aquadoggo/pull/100) - Inform materialization service about new operations [#161](https://github.com/p2panda/aquadoggo/pull/161) +- e2e publish entry tests [#167](https://github.com/p2panda/aquadoggo/pull/167) - Reschedule pending tasks on startup [#168](https://github.com/p2panda/aquadoggo/pull/168) - Add schema task and schema provider that update when new schema views are materialised [#166](https://github.com/p2panda/aquadoggo/pull/166/files) @@ -44,10 +45,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fix high CPU usage of idle workers [#136](https://github.com/p2panda/aquadoggo/pull/136) - Improve CI, track test coverage [#139](https://github.com/p2panda/aquadoggo/pull/139) +- Fix compatibility with PostgreSQL, change sqlx runtime to `tokio` [#170](https://github.com/p2panda/aquadoggo/pull/170) +- Use UPSERT for inserting or updating documents [#173](https://github.com/p2panda/aquadoggo/pull/173) ## [0.2.0] -*Please note: `aquadoggo-rs` crate is not published yet, due to unpublished dependencies.* +_Please note: `aquadoggo-rs` crate is not published yet, due to unpublished dependencies._ ### Changed @@ -80,6 +83,6 @@ Released on 2021-10-25: :package: [`crate`](https://crates.io/crates/aquadoggo/0 - Use p2panda-rs 0.2.1 with fixed linter setting [#41](https://github.com/p2panda/aquadoggo/41) - Use `tide` for HTTP server and `jsonrpc-v2` for JSON RPC [#29](https://github.com/p2panda/aquadoggo/29) -[Unreleased]: https://github.com/p2panda/aquadoggo/compare/v0.2.0...HEAD +[unreleased]: https://github.com/p2panda/aquadoggo/compare/v0.2.0...HEAD [0.2.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.2.0 [0.1.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.1.0 diff --git a/Cargo.lock b/Cargo.lock index 4f5685ce6..dec07a1e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -134,6 +134,7 @@ dependencies = [ "lru", "mockall", "mockall_double", + "once_cell", "openssl-probe", "p2panda-rs", "rand 0.8.5", @@ -194,46 +195,6 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71938f30533e4d95a6d17aa530939da3842c2ab6f4f84b9dae68447e4129f74a" -[[package]] -name = "async-channel" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8b508d585e01084059b60f06ade4cb7415cd2e4084b71dd1cb44e7d3fb9880" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - [[package]] name = "async-graphql" version = "3.0.38" @@ -323,51 +284,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "async-io" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e18f61464ae81cde0a23e713ae8fd299580c54d697a35820cfd0625b8b0e07" -dependencies = [ - "concurrent-queue", - "futures-lite", - "libc", - "log", - "once_cell", - "parking", - "polling", - "slab", - "socket2", - "waker-fn", - "winapi", -] - -[[package]] -name = "async-lock" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-process" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2c06e30a24e8c78a3987d07f0930edf76ef35e027e7bdb063fccafdad1f60c" -dependencies = [ - "async-io", - "blocking", - "cfg-if", - "event-listener", - "futures-lite", - "libc", - "once_cell", - "signal-hook", - "winapi", -] - [[package]] name = "async-recursion" version = "1.0.0" @@ -379,44 +295,6 @@ dependencies = [ "syn", ] -[[package]] -name = "async-rustls" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c86f33abd5a4f3e2d6d9251a9e0c6a7e52eb1113caf893dae8429bf4a53f378" -dependencies = [ - "futures-lite", - "rustls", - "webpki", -] - -[[package]] -name = "async-std" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" -dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" version = "0.3.3" @@ -438,12 +316,6 @@ dependencies = [ "syn", ] -[[package]] -name = "async-task" -version = "4.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30696a84d817107fc028e049980e09d5e140e8da8f1caeb17e8e950658a3cea9" - [[package]] name = "async-trait" version = "0.1.56" @@ -464,12 +336,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "atomic-waker" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" - [[package]] name = "atty" version = "0.2.14" @@ -481,15 +347,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "autocfg" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -594,9 +451,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "base64ct" -version = "1.1.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b4d9b1225d28d360ec6a231d65af1fd99a2a095154c8040689617290569c5c" +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" [[package]] name = "bitflags" @@ -668,20 +525,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "blocking" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc" -dependencies = [ - "async-channel", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", - "once_cell", -] - [[package]] name = "bumpalo" version = "3.10.0" @@ -709,12 +552,6 @@ dependencies = [ "serde", ] -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "cc" version = "1.0.73" @@ -834,15 +671,6 @@ dependencies = [ "unreachable", ] -[[package]] -name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -853,12 +681,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "const-oid" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" - [[package]] name = "const-oid" version = "0.7.1" @@ -928,7 +750,7 @@ version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ - "autocfg 1.1.0", + "autocfg", "cfg-if", "crossbeam-utils", "memoffset", @@ -948,25 +770,14 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" +checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" dependencies = [ "cfg-if", "once_cell", ] -[[package]] -name = "crypto-bigint" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83bd3bb4314701c568e340cd8cf78c975aa0ca79e03d3f6d1677d5b0c9c0c03" -dependencies = [ - "generic-array 0.14.5", - "rand_core 0.6.3", - "subtle", -] - [[package]] name = "crypto-bigint" version = "0.3.2" @@ -1089,23 +900,13 @@ dependencies = [ "tokio", ] -[[package]] -name = "der" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" -dependencies = [ - "const-oid 0.6.2", - "crypto-bigint 0.2.11", -] - [[package]] name = "der" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" dependencies = [ - "const-oid 0.7.1", + "const-oid", ] [[package]] @@ -1196,7 +997,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" dependencies = [ - "der 0.5.1", + "der", "elliptic-curve", "rfc6979", "signature", @@ -1241,8 +1042,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" dependencies = [ "base16ct", - "crypto-bigint 0.3.2", - "der 0.5.1", + "crypto-bigint", + "der", "ff", "generic-array 0.14.5", "group", @@ -1471,21 +1272,6 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" -[[package]] -name = "futures-lite" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - [[package]] name = "futures-macro" version = "0.3.21" @@ -1599,18 +1385,6 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" -[[package]] -name = "gloo-timers" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "graphql-introspection-query" version = "0.2.0" @@ -1649,7 +1423,7 @@ checksum = "b4bf9cd823359d74ad3d3ecf1afd4a975f4ff2f891cdf9a66744606daf52de8c" dependencies = [ "graphql-introspection-query", "graphql-parser", - "heck", + "heck 0.3.3", "lazy_static", "proc-macro2", "quote", @@ -1763,6 +1537,15 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "hermit-abi" version = "0.1.19" @@ -1941,11 +1724,11 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ - "autocfg 1.1.0", + "autocfg", "hashbrown 0.12.1", "serde", ] @@ -2005,23 +1788,11 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin 0.5.2", -] [[package]] name = "libc" @@ -2029,17 +1800,11 @@ version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" -[[package]] -name = "libm" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" - [[package]] name = "libsqlite3-sys" -version = "0.23.2" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cafc7c74096c336d9d27145f7ebd4f4b6f95ba16aa5a282387267e6925cb58" +checksum = "898745e570c7d0453cc1fbc4a701eb6c662ed54e8fec8b7d14be137ebeeb9d14" dependencies = [ "cc", "pkg-config", @@ -2064,7 +1829,7 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ - "autocfg 1.1.0", + "autocfg", "scopeguard", ] @@ -2075,7 +1840,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", - "value-bag", ] [[package]] @@ -2107,13 +1871,11 @@ checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "md-5" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5a279bb9607f9f53c22d496eade00d138d1bdcccd07d74650387cf94942a15" +checksum = "658646b21e0b72f7866c7038ab086d3d5e1cd6271f060fd37defb241949d0582" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.3", ] [[package]] @@ -2128,7 +1890,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ - "autocfg 1.1.0", + "autocfg", ] [[package]] @@ -2249,64 +2011,13 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint-dig" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4547ee5541c18742396ae2c895d0717d0f886d8823b8399cdaf7b07d63ad0480" -dependencies = [ - "autocfg 0.1.8", - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg 1.1.0", - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" -dependencies = [ - "autocfg 1.1.0", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ - "autocfg 1.1.0", - "libm", + "autocfg", ] [[package]] @@ -2362,7 +2073,7 @@ dependencies = [ "thiserror", "tls_codec", "typetag", - "uuid 1.1.2", + "uuid", ] [[package]] @@ -2472,12 +2183,6 @@ dependencies = [ "sec1", ] -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - [[package]] name = "parking_lot" version = "0.11.2" @@ -2532,15 +2237,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" -[[package]] -name = "pem-rfc7468" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84e93a3b1cc0510b03020f33f21e62acdde3dcaef432edc95bea377fbd4c2cd4" -dependencies = [ - "base64ct", -] - [[package]] name = "percent-encoding" version = "2.1.0" @@ -2622,38 +2318,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "116bee8279d783c0cf370efa1a94632f2108e5ef0bb32df31f051647810a4e2c" -dependencies = [ - "der 0.4.5", - "pem-rfc7468", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" -dependencies = [ - "der 0.4.5", - "pem-rfc7468", - "pkcs1", - "spki 0.4.1", - "zeroize", -] - [[package]] name = "pkcs8" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" dependencies = [ - "der 0.5.1", - "spki 0.5.4", + "der", + "spki", "zeroize", ] @@ -2663,19 +2335,6 @@ version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" -[[package]] -name = "polling" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" -dependencies = [ - "cfg-if", - "libc", - "log", - "wepoll-ffi", - "winapi", -] - [[package]] name = "poly1305" version = "0.7.2" @@ -2864,7 +2523,7 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ - "autocfg 1.1.0", + "autocfg", "crossbeam-deque", "either", "rayon-core", @@ -2969,7 +2628,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" dependencies = [ - "crypto-bigint 0.3.2", + "crypto-bigint", "hmac 0.11.0", "zeroize", ] @@ -3002,26 +2661,6 @@ dependencies = [ "text-size", ] -[[package]] -name = "rsa" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e05c2603e2823634ab331437001b411b9ed11660fbc4066f3908c84a9439260d" -dependencies = [ - "byteorder", - "digest 0.9.0", - "lazy_static", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1", - "pkcs8 0.7.6", - "rand 0.8.5", - "subtle", - "zeroize", -] - [[package]] name = "rstest" version = "0.12.0" @@ -3132,7 +2771,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cdc022b4f606353fe5dc85b09713a04e433323b70163e81513b141c6ae6eb5" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro2", "quote", "syn", @@ -3145,9 +2784,9 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ - "der 0.5.1", + "der", "generic-array 0.14.5", - "pkcs8 0.8.0", + "pkcs8", "subtle", "zeroize", ] @@ -3272,19 +2911,6 @@ dependencies = [ "opaque-debug 0.2.3", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - [[package]] name = "sha-1" version = "0.10.0" @@ -3320,16 +2946,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "signal-hook" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" -dependencies = [ - "libc", - "signal-hook-registry", -] - [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -3357,9 +2973,9 @@ checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "cc88c725d61fc6c3132893370cac4a0200e3fedf5da8331c570664b1987f5ca2" [[package]] name = "snafu" @@ -3407,15 +3023,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" -dependencies = [ - "der 0.4.5", -] - [[package]] name = "spki" version = "0.5.4" @@ -3423,7 +3030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" dependencies = [ "base64ct", - "der 0.5.1", + "der", ] [[package]] @@ -3439,9 +3046,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.5.11" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc15591eb44ffb5816a4a70a7efd5dd87bfd3aa84c4c200401c4396140525826" +checksum = "551873805652ba0d912fec5bbb0f8b4cdd96baf8e2ebf5970e5671092966019b" dependencies = [ "sqlx-core", "sqlx-macros", @@ -3449,9 +3056,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.5.11" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195183bf6ff8328bb82c0511a83faf60aacf75840103388851db61d7a9854ae3" +checksum = "e48c61941ccf5ddcada342cd59e3e5173b007c509e1e8e990dafc830294d9dc5" dependencies = [ "ahash", "atoi", @@ -3461,20 +3068,19 @@ dependencies = [ "bytes", "crc", "crossbeam-queue", - "digest 0.9.0", "dirs", "either", - "encoding_rs", + "event-listener", "flume", "futures-channel", "futures-core", "futures-executor", "futures-intrusive", "futures-util", - "generic-array 0.14.5", "hashlink", "hex", - "hmac 0.11.0", + "hkdf", + "hmac 0.12.1", "indexmap", "itoa", "libc", @@ -3482,25 +3088,22 @@ dependencies = [ "log", "md-5", "memchr", - "num-bigint", "once_cell", "paste", "percent-encoding", "rand 0.8.5", - "regex", - "rsa", "rustls", "serde", "serde_json", - "sha-1 0.9.8", - "sha2 0.9.9", + "sha-1 0.10.0", + "sha2 0.10.2", "smallvec", "sqlformat", "sqlx-rt", "stringprep", "thiserror", + "tokio-stream", "url", - "uuid 0.8.2", "webpki", "webpki-roots", "whoami", @@ -3508,17 +3111,17 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.5.11" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee35713129561f5e55c554bba1c378e2a7e67f81257b7311183de98c50e6f94" +checksum = "bc0fba2b0cae21fc00fe6046f8baa4c7fcb49e379f0f592b04696607f69ed2e1" dependencies = [ "dotenv", "either", - "heck", + "heck 0.4.0", "once_cell", "proc-macro2", "quote", - "sha2 0.9.9", + "sha2 0.10.2", "sqlx-core", "sqlx-rt", "syn", @@ -3531,8 +3134,9 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae" dependencies = [ - "async-rustls", - "async-std", + "once_cell", + "tokio", + "tokio-rustls", ] [[package]] @@ -3586,7 +3190,7 @@ version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro-error", "proc-macro2", "quote", @@ -3599,7 +3203,7 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bb0dc7ee9c15cea6199cde9a127fa16a4c5819af85395457ad72d68edc85a38" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro2", "quote", "rustversion", @@ -3779,6 +3383,28 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-tungstenite" version = "0.17.1" @@ -3908,9 +3534,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ "once_cell", ] @@ -3996,9 +3622,9 @@ checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "81dee68f85cab8cf68dec42158baf3a79a1cdc065a8b103025965d6ccb7f6cbd" dependencies = [ "tinyvec", ] @@ -4070,12 +3696,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" - [[package]] name = "uuid" version = "1.1.2" @@ -4085,16 +3705,6 @@ dependencies = [ "getrandom 0.2.7", ] -[[package]] -name = "value-bag" -version = "1.0.0-alpha.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] - [[package]] name = "varu64" version = "0.6.2" @@ -4125,12 +3735,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - [[package]] name = "want" version = "0.3.0" @@ -4248,15 +3852,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "whoami" version = "1.2.1" diff --git a/README.md b/README.md index 53311ac98..d91da41e7 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Configurable node server implementation for the [`p2panda`] network running as a - Awaits signed operations from clients via GraphQL. - Verifies the consistency, format and signature of operations and rejects invalid ones. -- Stores operations of the network in a SQL database of your choice (SQLite, PostgreSQL or MySQL). +- Stores operations of the network in an SQL database of your choice (SQLite, PostgreSQL). - Materializes views on top of the known data. - Answers filterable and paginated data queries via GraphQL. - Discovers other nodes in local network and internet. diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index c09cd5943..c82ba39b8 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -26,6 +26,7 @@ deadqueue = { version = "0.2.2", default-features = false, features = [ ] } directories = "3.0.2" envy = "0.4.2" +futures = "0.3.21" graphql_client = "0.10" hex = "0.4.3" lipmaa-link = "0.2.2" @@ -42,8 +43,10 @@ serde = { version = "1.0.130", features = ["derive"] } serde_json = "1.0.67" sqlformat = "0.1.7" sqlx = { version = "0.5.7", features = [ - "all-databases", - "runtime-async-std-rustls", + "any", + "postgres", + "sqlite", + "runtime-tokio-rustls", ] } thiserror = "1.0.29" tokio = { version = "1.17.0", features = [ @@ -61,12 +64,12 @@ triggered = "0.1.2" apollo-parser = "0.2.6" sea-query = "0.24.6" async-recursion = "1.0.0" -futures = "0.3.21" [dev-dependencies] env_logger = "0.9.0" hyper = "0.14.17" http = "0.2.6" +once_cell = "1.12.0" reqwest = { version = "0.11.9", default-features = false, features = [ "json", "stream", diff --git a/aquadoggo/README.md b/aquadoggo/README.md index 58771b673..d117c8288 100644 --- a/aquadoggo/README.md +++ b/aquadoggo/README.md @@ -45,11 +45,11 @@ Configurable node server implementation for the [`p2panda`] network which can be ## Features -- Awaits signed operations from clients via a JSON RPC API. +- Awaits signed operations from clients via GraphQL. - Verifies the consistency, format and signature of operations and rejects invalid ones. -- Stores operations of the network in a SQL database of your choice (SQLite, PostgreSQL or MySQL). +- Stores operations of the network in an SQL database of your choice (SQLite, PostgreSQL). - Materializes views on top of the known data. -- Answers filterable and paginated data queries. +- Answers filterable and paginated data queries via GraphQL. - Discovers other nodes in local network and internet. - Replicates data with other nodes. diff --git a/aquadoggo/migrations/20220509090252_create-operations.sql b/aquadoggo/migrations/20220509090252_create-operations.sql index 7147320ff..411fab6b6 100644 --- a/aquadoggo/migrations/20220509090252_create-operations.sql +++ b/aquadoggo/migrations/20220509090252_create-operations.sql @@ -15,7 +15,7 @@ CREATE TABLE IF NOT EXISTS operation_fields_v1 ( operation_id TEXT NOT NULL, name TEXT NOT NULL, field_type TEXT NOT NULL, - value BLOB NULL, + value TEXT NULL, list_index NUMERIC NOT NULL, FOREIGN KEY(operation_id) REFERENCES operations_v1(operation_id) ); diff --git a/aquadoggo/migrations/20220617115933_create-tasks.sql b/aquadoggo/migrations/20220617115933_create-tasks.sql index b351d722a..65c14ab83 100644 --- a/aquadoggo/migrations/20220617115933_create-tasks.sql +++ b/aquadoggo/migrations/20220617115933_create-tasks.sql @@ -11,8 +11,8 @@ CREATE TABLE IF NOT EXISTS tasks ( -- but we want to check for equality including `null` values. CREATE UNIQUE INDEX ux_tasks ON tasks ( name, - COALESCE(document_id, 0), - COALESCE(document_view_id, 0) + COALESCE(document_id, '0'), + COALESCE(document_view_id, '0') ); -- Create an index because primary keys can not contain `null` columns. diff --git a/aquadoggo/src/config.rs b/aquadoggo/src/config.rs index cf03f6f7e..810ba3081 100644 --- a/aquadoggo/src/config.rs +++ b/aquadoggo/src/config.rs @@ -27,7 +27,7 @@ pub struct Configuration { /// Path to data directory. pub base_path: Option, - /// Database url (sqlite, mysql or postgres). + /// Database url (SQLite or PostgreSQL). pub database_url: Option, /// Maximum number of database connections in pool. diff --git a/aquadoggo/src/db/mod.rs b/aquadoggo/src/db/mod.rs index 22f0e6bea..aeb6faaaa 100644 --- a/aquadoggo/src/db/mod.rs +++ b/aquadoggo/src/db/mod.rs @@ -21,8 +21,6 @@ pub async fn create_database(url: &str) -> Result<()> { Any::create_database(url).await?; } - Any::drop_database(url); - Ok(()) } diff --git a/aquadoggo/src/db/models/operation.rs b/aquadoggo/src/db/models/operation.rs index bca601e88..a1b99e581 100644 --- a/aquadoggo/src/db/models/operation.rs +++ b/aquadoggo/src/db/models/operation.rs @@ -23,8 +23,9 @@ pub struct OperationRow { /// The id of the schema this operation follows. pub schema_id: String, - /// The previous operations of this operation concatenated into string format with `_` seperator. - pub previous_operations: String, + /// The previous operations of this operation concatenated into string format with `_` + /// separator. + pub previous_operations: Option, } /// A struct representing a single operation field row as it is inserted in the database. @@ -34,13 +35,19 @@ pub struct OperationFieldRow { pub operation_id: String, /// The name of this field. - pub name: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub name: Option, /// The type of this field. - pub field_type: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub field_type: Option, /// The actual value contained in this field. - pub value: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub value: Option, } /// A struct representing a joined OperationRow and OperationFieldRow. @@ -64,15 +71,22 @@ pub struct OperationFieldsJoinedRow { /// The id of the schema this operation follows. pub schema_id: String, - /// The previous operations of this operation concatenated into string format with `_` seperator. - pub previous_operations: String, + /// The previous operations of this operation concatenated into string format with `_` + /// separator. + pub previous_operations: Option, /// The name of this field. - pub name: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub name: Option, /// The type of this field. - pub field_type: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub field_type: Option, /// The actual value contained in this field. - pub value: String, + /// + /// This is an Option as a DELETE operation contains no fields. + pub value: Option, } diff --git a/aquadoggo/src/db/provider.rs b/aquadoggo/src/db/provider.rs index dfaeddb27..0b2b3d818 100644 --- a/aquadoggo/src/db/provider.rs +++ b/aquadoggo/src/db/provider.rs @@ -3,6 +3,7 @@ use async_trait::async_trait; use p2panda_rs::document::{DocumentId, DocumentViewId}; use p2panda_rs::hash::Hash; +use p2panda_rs::operation::VerifiedOperation; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::errors::OperationStorageError; use p2panda_rs::storage_provider::traits::StorageProvider; @@ -31,7 +32,7 @@ impl SqlStorage { /// A `StorageProvider` implementation based on `sqlx` that supports SQLite and PostgreSQL /// databases. #[async_trait] -impl StorageProvider for SqlStorage { +impl StorageProvider for SqlStorage { type EntryArgsResponse = EntryArgsResponse; type EntryArgsRequest = EntryArgsRequest; type PublishEntryResponse = PublishEntryResponse; diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index 9efa670fd..238b588d8 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -167,6 +167,9 @@ impl DocumentStore for SqlStorage { ) VALUES ($1, $2, $3, $4) + ON CONFLICT(document_id) DO UPDATE SET + document_view_id = $2, + is_deleted = $3 ", ) .bind(document.id().as_str()) @@ -216,7 +219,7 @@ impl DocumentStore for SqlStorage { documents LEFT JOIN document_view_fields ON - documents.document_view_id = document_view_fields.document_view_id + documents.document_view_id = document_view_fields.document_view_id LEFT JOIN operation_fields_v1 ON document_view_fields.operation_id = operation_fields_v1.operation_id @@ -268,7 +271,7 @@ impl DocumentStore for SqlStorage { documents LEFT JOIN document_view_fields ON - documents.document_view_id = document_view_fields.document_view_id + documents.document_view_id = document_view_fields.document_view_id LEFT JOIN operation_fields_v1 ON document_view_fields.operation_id = operation_fields_v1.operation_id @@ -334,13 +337,15 @@ mod tests { use crate::db::stores::document::{DocumentStore, DocumentView}; use crate::db::stores::entry::StorageEntry; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; fn entries_to_document_views(entries: &[StorageEntry]) -> Vec { let mut document_views = Vec::new(); let mut current_document_view_fields = DocumentViewFields::new(); + for entry in entries { let operation_id: OperationId = entry.hash().into(); + for (name, value) in entry.operation().fields().unwrap().iter() { if entry.operation().is_delete() { continue; @@ -349,335 +354,360 @@ mod tests { .insert(name, DocumentViewValue::new(&operation_id, value)); } } + let document_view_fields = DocumentViewFields::new_from_operation_fields( &operation_id, &entry.operation().fields().unwrap(), ); + let document_view = DocumentView::new(&operation_id.clone().into(), &document_view_fields); + document_views.push(document_view) } + document_views } #[rstest] - #[tokio::test] - async fn inserts_gets_one_document_view( + fn inserts_gets_one_document_view( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - // Get one entry from the pre-polulated db - let entry = db - .store - .get_entry_at_seq_num(&author, &LogId::new(1), &SeqNum::new(1).unwrap()) - .await - .unwrap() - .unwrap(); - - // Construct a `DocumentView` - let operation_id: OperationId = entry.hash().into(); - let document_view_id: DocumentViewId = operation_id.clone().into(); - let document_view = DocumentView::new( - &document_view_id, - &DocumentViewFields::new_from_operation_fields( - &operation_id, - &entry.operation().fields().unwrap(), - ), - ); + // Get one entry from the pre-polulated db + let entry = db + .store + .get_entry_at_seq_num(&author, &LogId::new(1), &SeqNum::new(1).unwrap()) + .await + .unwrap() + .unwrap(); + + // Construct a `DocumentView` + let operation_id: OperationId = entry.hash().into(); + let document_view_id: DocumentViewId = operation_id.clone().into(); + let document_view = DocumentView::new( + &document_view_id, + &DocumentViewFields::new_from_operation_fields( + &operation_id, + &entry.operation().fields().unwrap(), + ), + ); - // Insert into db - let result = db - .store - .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) - .await; + // Insert into db + let result = db + .store + .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) + .await; - assert!(result.is_ok()); + assert!(result.is_ok()); - let retrieved_document_view = db - .store - .get_document_view_by_id(&document_view_id) - .await - .unwrap() - .unwrap(); - - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(retrieved_document_view.get(key).is_some()); - assert_eq!(retrieved_document_view.get(key), document_view.get(key)); - } + let retrieved_document_view = db + .store + .get_document_view_by_id(&document_view_id) + .await + .unwrap() + .unwrap(); + + for key in [ + "username", + "age", + "height", + "is_admin", + "profile_picture", + "many_profile_pictures", + "special_profile_picture", + "many_special_profile_pictures", + "another_relation_field", + ] { + assert!(retrieved_document_view.get(key).is_some()); + assert_eq!(retrieved_document_view.get(key), document_view.get(key)); + } + }); } #[rstest] - #[tokio::test] - async fn document_view_does_not_exist( + fn document_view_does_not_exist( random_document_view_id: DocumentViewId, #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - - let view_does_not_exist = db - .store - .get_document_view_by_id(&random_document_view_id) - .await - .unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let view_does_not_exist = db + .store + .get_document_view_by_id(&random_document_view_id) + .await + .unwrap(); - assert!(view_does_not_exist.is_none()) + assert!(view_does_not_exist.is_none()); + }); } #[rstest] - #[tokio::test] - async fn inserts_gets_many_document_views( + fn inserts_gets_many_document_views( #[from(test_db)] #[with(10, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); - let log_id = LogId::default(); - let seq_num = SeqNum::default(); + let log_id = LogId::default(); + let seq_num = SeqNum::default(); - // Get 10 entries from the pre-populated test db - let entries = db - .store - .get_paginated_log_entries(&author, &log_id, &seq_num, 10) - .await - .unwrap(); - - // Parse them into document views - let document_views = entries_to_document_views(&entries); - - // Insert each of these views into the db - for document_view in document_views.clone() { - db.store - .insert_document_view(&document_view, &schema_id) + // Get 10 entries from the pre-populated test db + let entries = db + .store + .get_paginated_log_entries(&author, &log_id, &seq_num, 10) .await .unwrap(); - } - // Retrieve them again and assert they are the same as the inserted ones - for (count, entry) in entries.iter().enumerate() { - let result = db.store.get_document_view_by_id(&entry.hash().into()).await; + // Parse them into document views + let document_views = entries_to_document_views(&entries); - assert!(result.is_ok()); + // Insert each of these views into the db + for document_view in document_views.clone() { + db.store + .insert_document_view(&document_view, &schema_id) + .await + .unwrap(); + } - let document_view = result.unwrap().unwrap(); + // Retrieve them again and assert they are the same as the inserted ones + for (count, entry) in entries.iter().enumerate() { + let result = db.store.get_document_view_by_id(&entry.hash().into()).await; - // The update operation should be included in the view correctly, we check that here. - let expected_username = if count == 0 { - DocumentViewValue::new( - &entry.hash().into(), - &OperationValue::Text("panda".to_string()), - ) - } else { - DocumentViewValue::new( - &entry.hash().into(), - &OperationValue::Text("PANDA".to_string()), - ) - }; - assert_eq!(document_view.get("username").unwrap(), &expected_username); - } + assert!(result.is_ok()); + + let document_view = result.unwrap().unwrap(); + + // The update operation should be included in the view correctly, we check that here. + let expected_username = if count == 0 { + DocumentViewValue::new( + &entry.hash().into(), + &OperationValue::Text("panda".to_string()), + ) + } else { + DocumentViewValue::new( + &entry.hash().into(), + &OperationValue::Text("PANDA".to_string()), + ) + }; + assert_eq!(document_view.get("username").unwrap(), &expected_username); + } + }); } #[rstest] - #[tokio::test] - async fn insert_document_view_with_missing_operation( + fn insert_document_view_with_missing_operation( #[from(random_operation_id)] operation_id: OperationId, #[from(random_document_view_id)] document_view_id: DocumentViewId, - #[from(test_db)] - #[future] - db: TestSqlStore, - + #[from(test_db)] runner: TestDatabaseRunner, operation: Operation, ) { - let db = db.await; - let document_view = DocumentView::new( - &document_view_id, - &DocumentViewFields::new_from_operation_fields( - &operation_id, - &operation.fields().unwrap(), - ), - ); - - let result = db - .store - .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) - .await; - - assert_eq!( - result.unwrap_err().to_string(), - "A fatal error occured in DocumentStore: error returned from database: FOREIGN KEY constraint failed".to_string() - ); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view = DocumentView::new( + &document_view_id, + &DocumentViewFields::new_from_operation_fields( + &operation_id, + &operation.fields().unwrap(), + ), + ); + + let result = db + .store + .insert_document_view(&document_view, &SchemaId::from_str(TEST_SCHEMA_ID).unwrap()) + .await; + + assert!(result.is_err()); + }); } #[rstest] - #[tokio::test] - async fn inserts_gets_documents( + fn inserts_gets_documents( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let result = db.store.insert_document(&document).await; + let result = db.store.insert_document(&document).await; - assert!(result.is_ok()); + assert!(result.is_ok()); - let document_view = db - .store - .get_document_view_by_id(document.view_id()) - .await - .unwrap() - .unwrap(); - - let expected_document_view = document.view().unwrap(); - - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(document_view.get(key).is_some()); - assert_eq!(document_view.get(key), expected_document_view.get(key)); - } + let document_view = db + .store + .get_document_view_by_id(document.view_id()) + .await + .unwrap() + .unwrap(); + + let expected_document_view = document.view().unwrap(); + + for key in [ + "username", + "age", + "height", + "is_admin", + "profile_picture", + "many_profile_pictures", + "special_profile_picture", + "many_special_profile_pictures", + "another_relation_field", + ] { + assert!(document_view.get(key).is_some()); + assert_eq!(document_view.get(key), expected_document_view.get(key)); + } + }); } #[rstest] - #[tokio::test] - async fn gets_document_by_id( + fn gets_document_by_id( #[from(test_db)] #[with(1, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let result = db.store.insert_document(&document).await; + let result = db.store.insert_document(&document).await; - assert!(result.is_ok()); + assert!(result.is_ok()); - let document_view = db - .store - .get_document_by_id(document.id()) - .await - .unwrap() - .unwrap(); - - let expected_document_view = document.view().unwrap(); - - for key in [ - "username", - "age", - "height", - "is_admin", - "profile_picture", - "many_profile_pictures", - "special_profile_picture", - "many_special_profile_pictures", - "another_relation_field", - ] { - assert!(document_view.get(key).is_some()); - assert_eq!(document_view.get(key), expected_document_view.get(key)); - } + let document_view = db + .store + .get_document_by_id(document.id()) + .await + .unwrap() + .unwrap(); + + let expected_document_view = document.view().unwrap(); + + for key in [ + "username", + "age", + "height", + "is_admin", + "profile_picture", + "many_profile_pictures", + "special_profile_picture", + "many_special_profile_pictures", + "another_relation_field", + ] { + assert!(document_view.get(key).is_some()); + assert_eq!(document_view.get(key), expected_document_view.get(key)); + } + }); } #[rstest] - #[tokio::test] - async fn no_view_when_document_deleted( + fn no_view_when_document_deleted( #[from(test_db)] #[with(10, 1, true)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let document_id = db.documents[0].clone(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let result = db.store.insert_document(&document).await; + let result = db.store.insert_document(&document).await; - assert!(result.is_ok()); + assert!(result.is_ok()); - let document_view = db.store.get_document_by_id(document.id()).await.unwrap(); + let document_view = db.store.get_document_by_id(document.id()).await.unwrap(); - assert!(document_view.is_none()); + assert!(document_view.is_none()); + }); } #[rstest] - #[tokio::test] - async fn gets_documents_by_schema( + fn updates_a_document( #[from(test_db)] - #[with(10, 2, false, TEST_SCHEMA_ID.parse().unwrap())] - #[future] - db: TestSqlStore, + #[with(10, 1)] + runner: TestDatabaseRunner, ) { - let db = db.await; - let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents[0].clone(); - for document_id in &db.documents { let document_operations = db .store - .get_operations_by_document_id(document_id) + .get_operations_by_document_id(&document_id) .await .unwrap(); let document = DocumentBuilder::new(document_operations).build().unwrap(); - db.store.insert_document(&document).await.unwrap(); - } + let mut current_operations = Vec::new(); + + for operation in document.operations() { + // For each operation in the db we insert a document, cumulatively adding the next operation + // each time. this should perform an "INSERT" first in the documents table, followed by 9 "UPDATES". + current_operations.push(operation.clone()); + let document = DocumentBuilder::new(current_operations.clone()) + .build() + .unwrap(); + let result = db.store.insert_document(&document).await; + assert!(result.is_ok()); + + let document_view = db.store.get_document_by_id(document.id()).await.unwrap(); + assert!(document_view.is_some()); + } + }) + } + + #[rstest] + fn gets_documents_by_schema( + #[from(test_db)] + #[with(10, 2, false, TEST_SCHEMA_ID.parse().unwrap())] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let schema_id = SchemaId::from_str(TEST_SCHEMA_ID).unwrap(); + + for document_id in &db.documents { + let document_operations = db + .store + .get_operations_by_document_id(document_id) + .await + .unwrap(); + + let document = DocumentBuilder::new(document_operations).build().unwrap(); + + db.store.insert_document(&document).await.unwrap(); + } - let schema_documents = db.store.get_documents_by_schema(&schema_id).await.unwrap(); + let schema_documents = db.store.get_documents_by_schema(&schema_id).await.unwrap(); - assert_eq!(schema_documents.len(), 2) + assert_eq!(schema_documents.len(), 2); + }); } } diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 919c5c8c4..80f5d2a3d 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -2,8 +2,6 @@ use async_trait::async_trait; use lipmaa_link::get_lipmaa_links_back_to; -use sqlx::{query, query_as}; - use p2panda_rs::entry::{decode_entry, Entry, EntrySigned, LogId, SeqNum}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::Author; @@ -13,17 +11,17 @@ use p2panda_rs::storage_provider::errors::EntryStorageError; use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore}; use p2panda_rs::storage_provider::ValidationError; use p2panda_rs::Validate; +use sqlx::{query, query_as}; use crate::db::models::EntryRow; use crate::db::provider::SqlStorage; -/// A signed entry and it's encoded operation. Entries are the lowest level data -/// type on the p2panda network, they are signed by authors and form bamboo append -/// only logs. The operation is an entries' payload, it contains the data mutations -/// which authors publish. +/// A signed entry and it's encoded operation. Entries are the lowest level data type on the +/// p2panda network, they are signed by authors and form bamboo append only logs. The operation is +/// an entries' payload, it contains the data mutations which authors publish. /// -/// This struct implements the `AsStorageEntry` trait which is required when -/// constructing the `EntryStore`. +/// This struct implements the `AsStorageEntry` trait which is required when constructing the +/// `EntryStore`. #[derive(Debug, Clone, PartialEq)] pub struct StorageEntry { entry_signed: EntrySigned, @@ -58,9 +56,9 @@ impl Validate for StorageEntry { } } -/// `From` implementation for converting an `EntryRow` into a `StorageEntry`. This is useful -/// when retrieving entries from the database. The `sqlx` crate coerces returned entry rows -/// into `EntryRow` but we normally want them as `StorageEntry`. +/// `From` implementation for converting an `EntryRow` into a `StorageEntry`. This is useful when +/// retrieving entries from the database. The `sqlx` crate coerces returned entry rows into +/// `EntryRow` but we normally want them as `StorageEntry`. impl From for StorageEntry { fn from(entry_row: EntryRow) -> Self { // Unwrapping everything here as we assume values coming from the database are valid. @@ -122,12 +120,11 @@ impl AsStorageEntry for StorageEntry { } } -/// Implementation of `EntryStore` trait which is required when constructing a -/// `StorageProvider`. +/// Implementation of `EntryStore` trait which is required when constructing a `StorageProvider`. /// -/// Handles storage and retrieval of entries in the form of`StorageEntry` which -/// implements the required `AsStorageEntry` trait. An intermediary struct `EntryRow` -/// is also used when retrieving an entry from the database. +/// Handles storage and retrieval of entries in the form of`StorageEntry` which implements the +/// required `AsStorageEntry` trait. An intermediary struct `EntryRow` is also used when retrieving +/// an entry from the database. #[async_trait] impl EntryStore for SqlStorage { /// Insert an entry into storage. @@ -174,9 +171,9 @@ impl EntryStore for SqlStorage { /// Get an entry from storage by it's hash id. /// - /// Returns a result containing the entry wrapped in an option if it was - /// found successfully. Returns `None` if the entry was not found in storage. - /// Errors when a fatal storage error occured. + /// Returns a result containing the entry wrapped in an option if it was found successfully. + /// Returns `None` if the entry was not found in storage. Errors when a fatal storage error + /// occured. async fn get_entry_by_hash( &self, hash: &Hash, @@ -207,9 +204,9 @@ impl EntryStore for SqlStorage { /// Get an entry at a sequence position within an author's log. /// - /// Returns a result containing the entry wrapped in an option if it was found - /// successfully. Returns None if the entry was not found in storage. Errors when - /// a fatal storage error occured. + /// Returns a result containing the entry wrapped in an option if it was found successfully. + /// Returns None if the entry was not found in storage. Errors when a fatal storage error + /// occured. async fn get_entry_at_seq_num( &self, author: &Author, @@ -246,9 +243,9 @@ impl EntryStore for SqlStorage { /// Get the latest entry of an author's log. /// - /// Returns a result containing the latest log entry wrapped in an option if an - /// entry was found. Returns None if the specified author and log could not be - /// found in storage. Errors when a fatal storage error occured. + /// Returns a result containing the latest log entry wrapped in an option if an entry was + /// found. Returns None if the specified author and log could not be found in storage. Errors + /// when a fatal storage error occured. async fn get_latest_entry( &self, author: &Author, @@ -286,9 +283,9 @@ impl EntryStore for SqlStorage { /// Get all entries of a given schema /// - /// Returns a result containing a vector of all entries which follow the passed - /// schema (identified by it's `SchemaId`). If no entries exist, or the schema - /// is not known by this node, then an empty vector is returned. + /// Returns a result containing a vector of all entries which follow the passed schema + /// (identified by it's `SchemaId`). If no entries exist, or the schema is not known by this + /// node, then an empty vector is returned. async fn get_entries_by_schema( &self, schema: &SchemaId, @@ -322,9 +319,9 @@ impl EntryStore for SqlStorage { /// Get all entries of a given schema. /// - /// Returns a result containing a vector of all entries which follow the passed - /// schema (identified by it's `SchemaId`). If no entries exist, or the schema - /// is not known by this node, then an empty vector is returned. + /// Returns a result containing a vector of all entries which follow the passed schema + /// (identified by it's `SchemaId`). If no entries exist, or the schema is not known by this + /// node, then an empty vector is returned. async fn get_paginated_log_entries( &self, author: &Author, @@ -348,7 +345,7 @@ impl EntryStore for SqlStorage { WHERE author = $1 AND log_id = $2 - AND CAST(seq_num AS NUMERIC) BETWEEN $3 and $4 + AND CAST(seq_num AS NUMERIC) BETWEEN CAST($3 AS NUMERIC) and CAST($4 AS NUMERIC) ORDER BY CAST(seq_num AS NUMERIC) ", @@ -366,13 +363,12 @@ impl EntryStore for SqlStorage { /// Get all entries which make up the certificate pool for a specified entry. /// - /// Returns a result containing a vector of all stored entries which are part - /// the passed entries' certificate pool. Errors if a fatal storage error - /// occurs. + /// Returns a result containing a vector of all stored entries which are part the passed + /// entries' certificate pool. Errors if a fatal storage error occurs. /// - /// It is worth noting that this method doesn't check if the certificate pool - /// is complete, it only returns entries which are part of the pool and found - /// in storage. If an entry was not stored, then the pool may be incomplete. + /// It is worth noting that this method doesn't check if the certificate pool is complete, it + /// only returns entries which are part of the pool and found in storage. If an entry was not + /// stored, then the pool may be incomplete. async fn get_certificate_pool( &self, author: &Author, @@ -435,259 +431,247 @@ mod tests { use rstest::rstest; use crate::db::stores::entry::StorageEntry; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] - #[tokio::test] - async fn insert_entry( - key_pair: KeyPair, - entry: Entry, - #[from(test_db)] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); - let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap(); - let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap(); - let result = db.store.insert_entry(doggo_entry).await; - - assert!(result.is_ok()) + fn insert_entry(key_pair: KeyPair, entry: Entry, #[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(entry.operation().unwrap()).unwrap(); + let doggo_entry = StorageEntry::new(&entry_encoded, &operation_encoded).unwrap(); + let result = db.store.insert_entry(doggo_entry).await; + + assert!(result.is_ok()); + }); } #[rstest] - #[tokio::test] - async fn try_insert_non_unique_entry( + fn try_insert_non_unique_entry( #[from(test_db)] #[with(10, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let log_id = LogId::new(1); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + let log_id = LogId::new(1); - let first_entry = db - .store - .get_entry_at_seq_num(&author, &log_id, &SeqNum::new(1).unwrap()) - .await - .unwrap() - .unwrap(); + let first_entry = db + .store + .get_entry_at_seq_num(&author, &log_id, &SeqNum::new(1).unwrap()) + .await + .unwrap() + .unwrap(); - let duplicate_doggo_entry = StorageEntry::new( - first_entry.entry_signed(), - first_entry.operation_encoded().unwrap(), - ) - .unwrap(); - let result = db.store.insert_entry(duplicate_doggo_entry).await; + let duplicate_doggo_entry = StorageEntry::new( + first_entry.entry_signed(), + first_entry.operation_encoded().unwrap(), + ) + .unwrap(); - assert_eq!( - result.unwrap_err().to_string(), - "Error occured during `EntryStorage` request in storage provider: error returned from \ - database: UNIQUE constraint failed: entries.author, entries.log_id, entries.seq_num" - ) + let result = db.store.insert_entry(duplicate_doggo_entry).await; + assert!(result.is_err()); + }); } #[rstest] - #[tokio::test] - async fn latest_entry( + fn latest_entry( #[from(test_db)] #[with(20, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); - let log_id = LogId::new(1); + runner.with_db_teardown(|db: TestDatabase| async move { + let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); + let log_id = LogId::new(1); - let latest_entry = db - .store - .get_latest_entry(&author_not_in_db, &log_id) - .await - .unwrap(); - assert!(latest_entry.is_none()); + let latest_entry = db + .store + .get_latest_entry(&author_not_in_db, &log_id) + .await + .unwrap(); + assert!(latest_entry.is_none()); - let author_in_db = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + let author_in_db = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let latest_entry = db - .store - .get_latest_entry(&author_in_db, &log_id) - .await - .unwrap(); - assert_eq!(latest_entry.unwrap().seq_num(), SeqNum::new(20).unwrap()); + let latest_entry = db + .store + .get_latest_entry(&author_in_db, &log_id) + .await + .unwrap(); + assert_eq!(latest_entry.unwrap().seq_num(), SeqNum::new(20).unwrap()); + }); } #[rstest] - #[tokio::test] - async fn entries_by_schema( + fn entries_by_schema( #[from(test_db)] #[with(20, 2, false, TEST_SCHEMA_ID.parse().unwrap())] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let schema_not_in_the_db = SchemaId::new_application( - "venue", - &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), - ); + runner.with_db_teardown(|db: TestDatabase| async move { + let schema_not_in_the_db = SchemaId::new_application( + "venue", + &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), + ); - let entries = db - .store - .get_entries_by_schema(&schema_not_in_the_db) - .await - .unwrap(); - assert!(entries.is_empty()); + let entries = db + .store + .get_entries_by_schema(&schema_not_in_the_db) + .await + .unwrap(); + assert!(entries.is_empty()); - let schema_in_the_db = TEST_SCHEMA_ID.parse().unwrap(); + let schema_in_the_db = TEST_SCHEMA_ID.parse().unwrap(); - let entries = db - .store - .get_entries_by_schema(&schema_in_the_db) - .await - .unwrap(); - assert!(entries.len() == 40); + let entries = db + .store + .get_entries_by_schema(&schema_in_the_db) + .await + .unwrap(); + assert!(entries.len() == 40); + }); } #[rstest] - #[tokio::test] - async fn entry_by_seq_number( + fn entry_by_seq_number( #[from(test_db)] #[with(10, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - - for seq_num in 1..10 { - let seq_num = SeqNum::new(seq_num).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + + for seq_num in 1..10 { + let seq_num = SeqNum::new(seq_num).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) + .await + .unwrap(); + assert_eq!(entry.unwrap().seq_num(), seq_num) + } + + let wrong_log = LogId::new(2); let entry = db .store - .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) + .get_entry_at_seq_num(&author, &wrong_log, &SeqNum::new(1).unwrap()) .await .unwrap(); - assert_eq!(entry.unwrap().seq_num(), seq_num) - } + assert!(entry.is_none()); - let wrong_log = LogId::new(2); - let entry = db - .store - .get_entry_at_seq_num(&author, &wrong_log, &SeqNum::new(1).unwrap()) - .await - .unwrap(); - assert!(entry.is_none()); - - let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); - let entry = db - .store - .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &SeqNum::new(1).unwrap()) - .await - .unwrap(); - assert!(entry.is_none()); + let author_not_in_db = Author::try_from(*KeyPair::new().public_key()).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &SeqNum::new(1).unwrap()) + .await + .unwrap(); + assert!(entry.is_none()); - let seq_num_not_in_log = SeqNum::new(1000).unwrap(); - let entry = db - .store - .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &seq_num_not_in_log) - .await - .unwrap(); - assert!(entry.is_none()) + let seq_num_not_in_log = SeqNum::new(1000).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author_not_in_db, &LogId::new(1), &seq_num_not_in_log) + .await + .unwrap(); + assert!(entry.is_none()); + }); } #[rstest] - #[tokio::test] - async fn get_entry_by_hash( + fn get_entry_by_hash( #[from(test_db)] #[with(20, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - - for seq_num in [1, 11, 18] { - let seq_num = SeqNum::new(seq_num).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + + for seq_num in [1, 11, 18] { + let seq_num = SeqNum::new(seq_num).unwrap(); + let entry = db + .store + .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) + .await + .unwrap() + .unwrap(); + + let entry_hash = entry.hash(); + let entry_by_hash = db + .store + .get_entry_by_hash(&entry_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(entry, entry_by_hash) + } + + let entry_hash_not_in_db = Hash::new_from_bytes(vec![1, 2, 3]).unwrap(); let entry = db .store - .get_entry_at_seq_num(&author, &LogId::new(1), &seq_num) - .await - .unwrap() - .unwrap(); - - let entry_hash = entry.hash(); - let entry_by_hash = db - .store - .get_entry_by_hash(&entry_hash) + .get_entry_by_hash(&entry_hash_not_in_db) .await - .unwrap() .unwrap(); - assert_eq!(entry, entry_by_hash) - } - - let entry_hash_not_in_db = Hash::new_from_bytes(vec![1, 2, 3]).unwrap(); - let entry = db - .store - .get_entry_by_hash(&entry_hash_not_in_db) - .await - .unwrap(); - assert!(entry.is_none()) + assert!(entry.is_none()); + }); } #[rstest] - #[tokio::test] - async fn paginated_log_entries( + fn paginated_log_entries( #[from(test_db)] #[with(30, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let entries = db - .store - .get_paginated_log_entries(&author, &LogId::default(), &SeqNum::default(), 20) - .await - .unwrap(); + let entries = db + .store + .get_paginated_log_entries(&author, &LogId::default(), &SeqNum::default(), 20) + .await + .unwrap(); - for entry in entries.clone() { - assert!(entry.seq_num().as_u64() >= 1 && entry.seq_num().as_u64() <= 20) - } + for entry in entries.clone() { + assert!(entry.seq_num().as_u64() >= 1 && entry.seq_num().as_u64() <= 20) + } - assert_eq!(entries.len(), 20); + assert_eq!(entries.len(), 20); - let entries = db - .store - .get_paginated_log_entries(&author, &LogId::default(), &SeqNum::new(21).unwrap(), 20) - .await - .unwrap(); + let entries = db + .store + .get_paginated_log_entries( + &author, + &LogId::default(), + &SeqNum::new(21).unwrap(), + 20, + ) + .await + .unwrap(); - assert_eq!(entries.len(), 10); + assert_eq!(entries.len(), 10); + }); } #[rstest] - #[tokio::test] - async fn get_lipmaa_link_entries( + fn get_lipmaa_link_entries( #[from(test_db)] #[with(100, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(db.key_pairs[0].public_key().to_owned()).unwrap(); - let entries = db - .store - .get_certificate_pool(&author, &LogId::default(), &SeqNum::new(20).unwrap()) - .await - .unwrap(); + let entries = db + .store + .get_certificate_pool(&author, &LogId::default(), &SeqNum::new(20).unwrap()) + .await + .unwrap(); - let cert_pool_seq_nums = entries - .iter() - .map(|entry| entry.seq_num().as_u64()) - .collect::>(); + let cert_pool_seq_nums = entries + .iter() + .map(|entry| entry.seq_num().as_u64()) + .collect::>(); - assert!(!entries.is_empty()); - assert_eq!(cert_pool_seq_nums, vec![19, 18, 17, 13, 4, 1]); + assert!(!entries.is_empty()); + assert_eq!(cert_pool_seq_nums, vec![19, 18, 17, 13, 4, 1]); + }); } } diff --git a/aquadoggo/src/db/stores/log.rs b/aquadoggo/src/db/stores/log.rs index 43a277690..a88fd8766 100644 --- a/aquadoggo/src/db/stores/log.rs +++ b/aquadoggo/src/db/stores/log.rs @@ -1,14 +1,13 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use async_trait::async_trait; -use sqlx::{query, query_scalar}; - use p2panda_rs::document::DocumentId; use p2panda_rs::entry::LogId; use p2panda_rs::identity::Author; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::errors::LogStorageError; use p2panda_rs::storage_provider::traits::{AsStorageLog, LogStore}; +use sqlx::{query, query_scalar}; use crate::db::provider::SqlStorage; @@ -179,226 +178,190 @@ impl LogStore for SqlStorage { #[cfg(test)] mod tests { - use std::convert::TryFrom; - - use p2panda_rs::document::DocumentViewId; - use p2panda_rs::entry::{sign_and_encode, Entry as P2PandaEntry, LogId, SeqNum}; + use p2panda_rs::document::{DocumentId, DocumentViewId}; + use p2panda_rs::entry::{EntrySigned, LogId}; use p2panda_rs::hash::Hash; - use p2panda_rs::identity::{Author, KeyPair}; - use p2panda_rs::operation::{Operation, OperationEncoded, OperationFields, OperationValue}; + use p2panda_rs::identity::Author; + use p2panda_rs::operation::{OperationEncoded, OperationId}; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::{ AsStorageEntry, AsStorageLog, EntryStore, LogStore, StorageProvider, }; + use p2panda_rs::test_utils::fixtures::{ + entry_signed_encoded, operation_encoded, public_key, random_document_id, + random_operation_id, schema, + }; + use rstest::rstest; - use crate::db::provider::SqlStorage; use crate::db::stores::entry::StorageEntry; use crate::db::stores::log::StorageLog; - use crate::test_helpers::{initialize_db, random_entry_hash}; - - const TEST_AUTHOR: &str = "58223678ab378f1b07d1d8c789e6da01d16a06b1a4d17cc10119a0109181156c"; - - #[tokio::test] - async fn initial_log_id() { - let pool = initialize_db().await; - let author = Author::new(TEST_AUTHOR).unwrap(); - let storage_provider = SqlStorage { pool }; - - let log_id = storage_provider - .find_document_log_id(&author, None) - .await - .unwrap(); - - assert_eq!(log_id, LogId::new(1)); + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + + #[rstest] + fn initial_log_id( + #[from(public_key)] author: Author, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); + assert_eq!(log_id, LogId::new(1)); + }); } - #[tokio::test] - async fn prevent_duplicate_log_ids() { - let pool = initialize_db().await; - let storage_provider = SqlStorage { pool }; - - let author = Author::new(TEST_AUTHOR).unwrap(); - let document = Hash::new(&random_entry_hash()).unwrap(); - let schema = - SchemaId::new_application("venue", &Hash::new(&random_entry_hash()).unwrap().into()); - - let log = StorageLog::new(&author, &schema, &document.clone().into(), &LogId::new(1)); - assert!(storage_provider.insert_log(log).await.is_ok()); - - let log = StorageLog::new(&author, &schema, &document.into(), &LogId::new(1)); - assert!(storage_provider.insert_log(log).await.is_err()); + #[rstest] + fn prevent_duplicate_log_ids( + #[from(public_key)] author: Author, + #[from(schema)] schema: SchemaId, + #[from(random_document_id)] document: DocumentId, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log = StorageLog::new(&author, &schema, &document.clone(), &LogId::new(1)); + assert!(db.store.insert_log(log).await.is_ok()); + + let log = StorageLog::new(&author, &schema, &document, &LogId::new(1)); + assert!(db.store.insert_log(log).await.is_err()); + }); } - #[tokio::test] - async fn with_multi_hash_schema_id() { - let pool = initialize_db().await; - let storage_provider = SqlStorage { pool }; - - let author = Author::new(TEST_AUTHOR).unwrap(); - let document = Hash::new(&random_entry_hash()).unwrap(); - let schema = SchemaId::new_application( - "venue", - &DocumentViewId::new(&[ - Hash::new(&random_entry_hash()).unwrap().into(), - Hash::new(&random_entry_hash()).unwrap().into(), - ]) - .unwrap(), - ); - - let log = StorageLog::new(&author, &schema, &document.into(), &LogId::new(1)); - - assert!(storage_provider.insert_log(log).await.is_ok()); + #[rstest] + fn with_multi_hash_schema_id( + #[from(public_key)] author: Author, + #[from(random_operation_id)] operation_id_1: OperationId, + #[from(random_operation_id)] operation_id_2: OperationId, + #[from(random_document_id)] document: DocumentId, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let schema = SchemaId::new_application( + "venue", + &DocumentViewId::new(&[operation_id_1, operation_id_2]).unwrap(), + ); + + let log = StorageLog::new(&author, &schema, &document, &LogId::new(1)); + + assert!(db.store.insert_log(log).await.is_ok()); + }); } - #[tokio::test] - async fn selecting_next_log_id() { - let pool = initialize_db().await; - let key_pair = KeyPair::new(); - let author = Author::try_from(*key_pair.public_key()).unwrap(); - let schema = SchemaId::new_application( - "venue", - &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), - ); - - let storage_provider = SqlStorage { pool }; - - let log_id = storage_provider - .find_document_log_id(&author, None) - .await - .unwrap(); - - // We expect to be given the next log id when asking for a possible log id for a new - // document by the same author - assert_eq!(log_id, LogId::default()); - - // Starting with an empty db, we expect to be able to count up from 1 and expect each - // inserted document's log id to be euqal to the count index - for n in 1..12 { - let doc = Hash::new_from_bytes(vec![1, 2, n]).unwrap().into(); - - let log_id = storage_provider - .find_document_log_id(&author, None) - .await - .unwrap(); - assert_eq!(LogId::new(n.into()), log_id); - let log = StorageLog::new(&author, &schema, &doc, &log_id); - storage_provider.insert_log(log).await.unwrap(); - } + #[rstest] + fn selecting_next_log_id( + #[from(public_key)] author: Author, + #[from(schema)] schema: SchemaId, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); + + // We expect to be given the next log id when asking for a possible log id for a new + // document by the same author + assert_eq!(log_id, LogId::default()); + + // Starting with an empty db, we expect to be able to count up from 1 and expect each + // inserted document's log id to be euqal to the count index + for n in 1..12 { + let doc = Hash::new_from_bytes(vec![1, 2, n]).unwrap().into(); + let log_id = db.store.find_document_log_id(&author, None).await.unwrap(); + + assert_eq!(LogId::new(n.into()), log_id); + + let log = StorageLog::new(&author, &schema, &doc, &log_id); + db.store.insert_log(log).await.unwrap(); + } + }); } - #[tokio::test] - async fn document_log_id() { - let pool = initialize_db().await; - - // Create a new document - // TODO: use p2panda-rs test utils once available - let key_pair = KeyPair::new(); - let author = Author::try_from(*key_pair.public_key()).unwrap(); - let log_id = LogId::new(1); - let schema = SchemaId::new_application( - "venue", - &Hash::new_from_bytes(vec![1, 2, 3]).unwrap().into(), - ); - let seq_num = SeqNum::new(1).unwrap(); - let mut fields = OperationFields::new(); - fields - .add("test", OperationValue::Text("Hello".to_owned())) - .unwrap(); - let operation = Operation::new_create(schema.clone(), fields).unwrap(); - let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); - let entry = P2PandaEntry::new(&log_id, Some(&operation), None, None, &seq_num).unwrap(); - let entry_encoded = sign_and_encode(&entry, &key_pair).unwrap(); - - let storage_provider = SqlStorage { pool }; - - // Expect database to return nothing yet - assert_eq!( - storage_provider - .get_document_by_entry(&entry_encoded.hash()) - .await - .unwrap(), - None - ); - - let entry = StorageEntry::new(&entry_encoded.clone(), &operation_encoded).unwrap(); - - // Store entry in database - assert!(storage_provider.insert_entry(entry).await.is_ok()); - - let log = StorageLog::new( - &author, - &schema, - &entry_encoded.hash().into(), - &LogId::new(1), - ); - - // Store log in database - assert!(storage_provider.insert_log(log).await.is_ok()); - - // Expect to find document in database. The document hash should be the same as the hash of - // the entry which referred to the `CREATE` operation. - assert_eq!( - storage_provider - .get_document_by_entry(&entry_encoded.hash()) - .await - .unwrap(), - Some(entry_encoded.hash().into()) - ); - - // We expect to find this document in the default log - assert_eq!( - storage_provider - .find_document_log_id(&author, Some(&entry_encoded.hash().into())) - .await - .unwrap(), - LogId::default() - ); + #[rstest] + fn document_log_id( + #[from(schema)] schema: SchemaId, + #[from(entry_signed_encoded)] entry_encoded: EntrySigned, + #[from(operation_encoded)] operation_encoded: OperationEncoded, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Expect database to return nothing yet + assert_eq!( + db.store + .get_document_by_entry(&entry_encoded.hash()) + .await + .unwrap(), + None + ); + + let entry = StorageEntry::new(&entry_encoded.clone(), &operation_encoded).unwrap(); + let author = entry.author(); + + // Store entry in database + assert!(db.store.insert_entry(entry).await.is_ok()); + + let log = StorageLog::new( + &author, + &schema, + &entry_encoded.hash().into(), + &LogId::new(1), + ); + + // Store log in database + assert!(db.store.insert_log(log).await.is_ok()); + + // Expect to find document in database. The document hash should be the same as the + // hash of the entry which referred to the `CREATE` operation. + assert_eq!( + db.store + .get_document_by_entry(&entry_encoded.hash()) + .await + .unwrap(), + Some(entry_encoded.hash().into()) + ); + + // We expect to find this document in the default log + assert_eq!( + db.store + .find_document_log_id(&author, Some(&entry_encoded.hash().into())) + .await + .unwrap(), + LogId::default() + ); + }); } - #[tokio::test] - async fn log_ids() { - let pool = initialize_db().await; + #[rstest] + fn log_ids( + #[from(public_key)] author: Author, + #[from(test_db)] runner: TestDatabaseRunner, + #[from(schema)] schema: SchemaId, + #[from(random_document_id)] document_first: DocumentId, + #[from(random_document_id)] document_second: DocumentId, + #[from(random_document_id)] document_third: DocumentId, + #[from(random_document_id)] document_forth: DocumentId, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Register two log ids at the beginning + let log_1 = StorageLog::new(&author, &schema, &document_first, &LogId::new(1)); + let log_2 = StorageLog::new(&author, &schema, &document_second, &LogId::new(2)); - // Mock author - let author = Author::new(TEST_AUTHOR).unwrap(); + db.store.insert_log(log_1).await.unwrap(); + db.store.insert_log(log_2).await.unwrap(); - // Mock schema - let schema = - SchemaId::new_application("venue", &Hash::new(&random_entry_hash()).unwrap().into()); + // Find next free log id and register it + let log_id = db.store.next_log_id(&author).await.unwrap(); + assert_eq!(log_id, LogId::new(3)); - // Mock four different document hashes - let document_first = Hash::new(&random_entry_hash()).unwrap(); - let document_second = Hash::new(&random_entry_hash()).unwrap(); - let document_third = Hash::new(&random_entry_hash()).unwrap(); - let document_forth = Hash::new(&random_entry_hash()).unwrap(); + let log_3 = StorageLog::new(&author, &schema, &document_third.into(), &log_id); - let storage_provider = SqlStorage { pool }; + db.store.insert_log(log_3).await.unwrap(); - // Register two log ids at the beginning - let log_1 = StorageLog::new(&author, &schema, &document_first.into(), &LogId::new(1)); - let log_2 = StorageLog::new(&author, &schema, &document_second.into(), &LogId::new(2)); + // Find next free log id and register it + let log_id = db.store.next_log_id(&author).await.unwrap(); + assert_eq!(log_id, LogId::new(4)); - storage_provider.insert_log(log_1).await.unwrap(); - storage_provider.insert_log(log_2).await.unwrap(); + let log_4 = StorageLog::new(&author, &schema, &document_forth.into(), &log_id); - // Find next free log id and register it - let log_id = storage_provider.next_log_id(&author).await.unwrap(); - assert_eq!(log_id, LogId::new(3)); + db.store.insert_log(log_4).await.unwrap(); - let log_3 = StorageLog::new(&author, &schema, &document_third.into(), &log_id); - - storage_provider.insert_log(log_3).await.unwrap(); - - // Find next free log id and register it - let log_id = storage_provider.next_log_id(&author).await.unwrap(); - assert_eq!(log_id, LogId::new(4)); - - let log_4 = StorageLog::new(&author, &schema, &document_forth.into(), &log_id); - - storage_provider.insert_log(log_4).await.unwrap(); - - // Find next free log id - let log_id = storage_provider.next_log_id(&author).await.unwrap(); - assert_eq!(log_id, LogId::new(5)); + // Find next free log id + let log_id = db.store.next_log_id(&author).await.unwrap(); + assert_eq!(log_id, LogId::new(5)); + }); } } diff --git a/aquadoggo/src/db/stores/operation.rs b/aquadoggo/src/db/stores/operation.rs index 371654e51..cac2b5d96 100644 --- a/aquadoggo/src/db/stores/operation.rs +++ b/aquadoggo/src/db/stores/operation.rs @@ -144,7 +144,7 @@ impl OperationStore for SqlStorage { .bind(name.to_owned()) .bind(value.field_type().to_string()) .bind(db_value) - .bind(index.to_string()) + .bind(index as i32) .execute(&self.pool) }) .collect::>() @@ -298,7 +298,7 @@ mod tests { }; use rstest::rstest; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; #[rstest] #[case::create_operation(create_operation(&default_fields()))] @@ -306,63 +306,57 @@ mod tests { #[case::update_operation_many_prev_ops(update_operation(&default_fields(), &random_previous_operations(12)))] #[case::delete_operation(delete_operation(&DEFAULT_HASH.parse().unwrap()))] #[case::delete_operation_many_prev_ops(delete_operation(&random_previous_operations(12)))] - #[tokio::test] - async fn insert_get_operations( + fn insert_get_operations( #[case] operation: Operation, #[from(public_key)] author: Author, operation_id: OperationId, document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - // Construct the storage operation. - let operation = VerifiedOperation::new(&author, &operation_id, &operation).unwrap(); - - // Insert the doggo operation into the db, returns Ok(true) when succesful. - let result = db.store.insert_operation(&operation, &document_id).await; - assert!(result.is_ok()); - - // Request the previously inserted operation by it's id. - let returned_operation = db - .store - .get_operation_by_id(operation.operation_id()) - .await - .unwrap() - .unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + // Construct the storage operation. + let operation = VerifiedOperation::new(&author, &operation_id, &operation).unwrap(); + + // Insert the doggo operation into the db, returns Ok(true) when succesful. + let result = db.store.insert_operation(&operation, &document_id).await; + assert!(result.is_ok()); + + // Request the previously inserted operation by it's id. + let returned_operation = db + .store + .get_operation_by_id(operation.operation_id()) + .await + .unwrap() + .unwrap(); - assert_eq!(returned_operation.public_key(), operation.public_key()); - assert_eq!(returned_operation.fields(), operation.fields()); - assert_eq!(returned_operation.operation_id(), operation.operation_id()); + assert_eq!(returned_operation.public_key(), operation.public_key()); + assert_eq!(returned_operation.fields(), operation.fields()); + assert_eq!(returned_operation.operation_id(), operation.operation_id()); + }); } #[rstest] - #[tokio::test] - async fn insert_operation_twice( + fn insert_operation_twice( #[from(verified_operation)] verified_operation: VerifiedOperation, document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - - assert!(db - .store - .insert_operation(&verified_operation, &document_id) - .await - .is_ok()); + runner.with_db_teardown(|db: TestDatabase| async move { + db.store + .insert_operation(&verified_operation, &document_id) + .await + .unwrap(); - assert_eq!( - db.store.insert_operation(&verified_operation, &document_id).await.unwrap_err().to_string(), - "A fatal error occured in OperationStore: error returned from database: UNIQUE constraint failed: operations_v1.entry_hash" - ) + assert!(db + .store + .insert_operation(&verified_operation, &document_id) + .await + .is_err()); + }); } #[rstest] - #[tokio::test] - async fn gets_document_by_operation_id( + fn gets_document_by_operation_id( #[from(verified_operation)] #[with(Some(operation_fields(default_fields())), None, None, None, Some(DEFAULT_HASH.parse().unwrap()))] create_operation: VerifiedOperation, @@ -370,80 +364,77 @@ mod tests { #[with(Some(operation_fields(default_fields())), Some(DEFAULT_HASH.parse().unwrap()))] update_operation: VerifiedOperation, document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - - assert!(db - .store - .get_document_by_operation_id(create_operation.operation_id()) - .await - .unwrap() - .is_none()); - - db.store - .insert_operation(&create_operation, &document_id) - .await - .unwrap(); - - assert_eq!( - db.store + runner.with_db_teardown(|db: TestDatabase| async move { + assert!(db + .store .get_document_by_operation_id(create_operation.operation_id()) .await .unwrap() - .unwrap(), - document_id.clone() - ); + .is_none()); - db.store - .insert_operation(&update_operation, &document_id) - .await - .unwrap(); + db.store + .insert_operation(&create_operation, &document_id) + .await + .unwrap(); + + assert_eq!( + db.store + .get_document_by_operation_id(create_operation.operation_id()) + .await + .unwrap() + .unwrap(), + document_id.clone() + ); - assert_eq!( db.store - .get_document_by_operation_id(create_operation.operation_id()) + .insert_operation(&update_operation, &document_id) .await - .unwrap() - .unwrap(), - document_id.clone() - ); + .unwrap(); + + assert_eq!( + db.store + .get_document_by_operation_id(create_operation.operation_id()) + .await + .unwrap() + .unwrap(), + document_id.clone() + ); + }); } #[rstest] - #[tokio::test] - async fn get_operations_by_document_id( + fn get_operations_by_document_id( key_pair: KeyPair, #[from(test_db)] #[with(5, 1)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); - let latest_entry = db - .store - .get_latest_entry(&author, &LogId::default()) - .await - .unwrap() - .unwrap(); + let latest_entry = db + .store + .get_latest_entry(&author, &LogId::default()) + .await + .unwrap() + .unwrap(); - let document_id = db - .store - .get_document_by_entry(&latest_entry.hash()) - .await - .unwrap() - .unwrap(); + let document_id = db + .store + .get_document_by_entry(&latest_entry.hash()) + .await + .unwrap() + .unwrap(); - let operations_by_document_id = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let operations_by_document_id = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - assert_eq!(operations_by_document_id.len(), 5) + assert_eq!(operations_by_document_id.len(), 5) + }); } } diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index edb632ba9..0e0358be7 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -106,7 +106,9 @@ mod tests { use rstest::rstest; use crate::db::provider::SqlStorage; - use crate::db::stores::test_utils::{insert_entry_operation_and_view, test_db, TestSqlStore}; + use crate::db::stores::test_utils::{ + insert_entry_operation_and_view, test_db, TestDatabase, TestDatabaseRunner, + }; use super::SchemaStore; @@ -163,108 +165,187 @@ mod tests { #[rstest] #[case::valid_schema_and_fields( - "venue_name = { type: \"str\", value: tstr, }\ncreate-fields = { venue_name }\nupdate-fields = { + ( venue_name ) }", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"name\"")] - #[case::fields_missing_name_field( - "", - operation_fields(vec![("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"type\"")] - #[case::fields_missing_type_field( - "", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string()))]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"name\"")] - #[case::schema_missing_name_field( - "", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "missing field \"description\"")] - #[case::schema_missing_name_description( - "", - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string()))]))] - #[tokio::test] - async fn get_schema( + r#"venue_name = { type: "str", value: tstr, } + create-fields = { venue_name } + update-fields = { + ( venue_name ) }"#, + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + fn get_schema( #[case] cddl_str: &str, #[case] schema_field_definition: OperationFields, #[case] schema_definition: OperationFields, key_pair: KeyPair, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let document_view_id = - insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; + let cddl_str = cddl_str.to_string(); - let document_view_id = - insert_schema_definition(&db.store, &key_pair, &document_view_id, schema_definition) - .await; + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; - let schema = db - .store - .get_schema_by_id(&document_view_id) - .await - .unwrap_or_else(|e| panic!("{}", e)); + let document_view_id = insert_schema_definition( + &db.store, + &key_pair, + &document_view_id, + schema_definition, + ) + .await; + + let schema = db.store.get_schema_by_id(&document_view_id).await.unwrap(); - assert_eq!(schema.unwrap().as_cddl(), cddl_str) + assert_eq!( + schema.unwrap().as_cddl().replace(" ", ""), + cddl_str.replace(" ", "") + ); + }); } #[rstest] - #[case::works( - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string())), ("type", FieldType::String.into())]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[should_panic(expected = "invalid fields found for this schema")] - #[case::does_not_work( - operation_fields(vec![("name", OperationValue::Text("venue_name".to_string()))]), - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[tokio::test] - async fn get_all_schema( + #[case::fields_missing_name_field("missing field \"name\"", + operation_fields(vec![ + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::fields_missing_type_field("missing field \"type\"", + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::schema_missing_name_field("missing field \"name\"", + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::schema_missing_name_description("missing field \"description\"", + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())) + ]) + )] + fn get_schema_errors( + #[case] err_str: &str, #[case] schema_field_definition: OperationFields, #[case] schema_definition: OperationFields, key_pair: KeyPair, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let document_view_id = - insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; + let err_str = err_str.to_string(); + + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; - insert_schema_definition(&db.store, &key_pair, &document_view_id, schema_definition).await; + let document_view_id = insert_schema_definition( + &db.store, + &key_pair, + &document_view_id, + schema_definition, + ) + .await; - let schemas = db - .store - .get_all_schema() - .await - .unwrap_or_else(|e| panic!("{}", e)); + let schema = db.store.get_schema_by_id(&document_view_id).await; - assert_eq!(schemas.len(), 1) + assert_eq!(schema.unwrap_err().to_string(), err_str); + }); } #[rstest] - #[case::schema_fields_do_not_exist( - operation_fields(vec![("name", OperationValue::Text("venue".to_string())), ("description", OperationValue::Text("My venue".to_string()))]))] - #[tokio::test] - async fn schema_fields_do_not_exist( + #[case::works( + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())), + ("type", FieldType::String.into()) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + #[case::does_not_work( + operation_fields(vec![ + ("name", OperationValue::Text("venue_name".to_string())) + ]), + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + fn get_all_schema( + #[case] schema_field_definition: OperationFields, #[case] schema_definition: OperationFields, - #[from(document_view_id)] schema_fields_id: DocumentViewId, - #[from(test_db)] - #[future] - db: TestSqlStore, key_pair: KeyPair, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let document_view_id = - insert_schema_definition(&db.store, &key_pair, &schema_fields_id, schema_definition) + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_field_definition(&db.store, &key_pair, schema_field_definition).await; + + insert_schema_definition(&db.store, &key_pair, &document_view_id, schema_definition) .await; - // Retrieve the schema by it's document_view_id. - let schema = db.store.get_schema_by_id(&document_view_id).await; + let schemas = db.store.get_all_schema().await; + + if schemas.is_err() { + assert_eq!( + schemas.unwrap_err().to_string(), + "invalid fields found for this schema".to_string() + ) + } else { + assert_eq!(schemas.unwrap().len(), 1); + } + }); + } - assert_eq!(schema.unwrap_err().to_string(), format!("No document view found for schema field definition with id: {0} which is required by schema definition {1}", schema_fields_id, document_view_id)) + #[rstest] + #[case::schema_fields_do_not_exist( + operation_fields(vec![ + ("name", OperationValue::Text("venue".to_string())), + ("description", OperationValue::Text("My venue".to_string())) + ]) + )] + fn schema_fields_do_not_exist( + #[case] schema_definition: OperationFields, + #[from(document_view_id)] schema_fields_id: DocumentViewId, + #[from(test_db)] runner: TestDatabaseRunner, + key_pair: KeyPair, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let document_view_id = + insert_schema_definition(&db.store, &key_pair, &schema_fields_id, schema_definition) + .await; + + // Retrieve the schema by it's document_view_id. + let schema = db.store.get_schema_by_id(&document_view_id).await; + + assert_eq!( + schema.unwrap_err().to_string(), + format!( + "No document view found for schema field definition with id: {0} which is required by schema definition {1}", + schema_fields_id, + document_view_id + ) + ); + }); } } diff --git a/aquadoggo/src/db/stores/task.rs b/aquadoggo/src/db/stores/task.rs index d1ff1417d..c82e08f9d 100644 --- a/aquadoggo/src/db/stores/task.rs +++ b/aquadoggo/src/db/stores/task.rs @@ -24,7 +24,7 @@ impl SqlStorage { // Insert task into database query( " - INSERT OR IGNORE INTO + INSERT INTO tasks ( name, document_id, @@ -32,6 +32,7 @@ impl SqlStorage { ) VALUES ($1, $2, $3) + ON CONFLICT DO NOTHING ", ) .bind(task.worker_name()) @@ -61,9 +62,10 @@ impl SqlStorage { tasks WHERE name = $1 - -- Use `IS` because these columns can contain `null` values. - AND document_id IS $2 - AND document_view_id IS $3 + -- Use `COALESCE` to compare possible null values in a way + -- that is compatible between SQLite and PostgreSQL. + AND COALESCE(document_id, '0') = COALESCE($2, '0') + AND COALESCE(document_view_id, '0') = COALESCE($3, '0') ", ) .bind(task.worker_name()) @@ -126,62 +128,53 @@ mod tests { use p2panda_rs::test_utils::fixtures::{document_id, document_view_id}; use rstest::rstest; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::materializer::{Task, TaskInput}; #[rstest] - #[tokio::test] - async fn insert_get_remove_tasks( + fn insert_get_remove_tasks( document_view_id: DocumentViewId, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - - // Prepare test data - let task = Task::new("reduce", TaskInput::new(None, Some(document_view_id))); - - // Insert task - let result = db.store.insert_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Check if task exists in database - let result = db.store.get_tasks().await; - assert_eq!(result.unwrap(), vec![task.clone()]); - - // Remove task - let result = db.store.remove_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Check if all tasks got removed - let result = db.store.get_tasks().await; - assert_eq!(result.unwrap(), vec![]); + runner.with_db_teardown(|db: TestDatabase| async move { + // Prepare test data + let task = Task::new("reduce", TaskInput::new(None, Some(document_view_id))); + + // Insert task + let result = db.store.insert_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Check if task exists in database + let result = db.store.get_tasks().await; + assert_eq!(result.unwrap(), vec![task.clone()]); + + // Remove task + let result = db.store.remove_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Check if all tasks got removed + let result = db.store.get_tasks().await; + assert_eq!(result.unwrap(), vec![]); + }); } #[rstest] - #[tokio::test] - async fn avoid_duplicates( - document_id: DocumentId, - #[from(test_db)] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - - // Prepare test data - let task = Task::new("reduce", TaskInput::new(Some(document_id), None)); - - // Insert task - let result = db.store.insert_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Insert the same thing again, it should silently fail - let result = db.store.insert_task(&task).await; - assert!(result.is_ok(), "{:?}", result); - - // Check for duplicates - let result = db.store.get_tasks().await; - assert_eq!(result.unwrap().len(), 1); + fn avoid_duplicates(document_id: DocumentId, #[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + // Prepare test data + let task = Task::new("reduce", TaskInput::new(Some(document_id), None)); + + // Insert task + let result = db.store.insert_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Insert the same thing again, it should silently fail + let result = db.store.insert_task(&task).await; + assert!(result.is_ok(), "{:?}", result); + + // Check for duplicates + let result = db.store.get_tasks().await; + assert_eq!(result.unwrap().len(), 1); + }); } } diff --git a/aquadoggo/src/db/stores/test_utils.rs b/aquadoggo/src/db/stores/test_utils.rs index 9baf60cd4..609669f55 100644 --- a/aquadoggo/src/db/stores/test_utils.rs +++ b/aquadoggo/src/db/stores/test_utils.rs @@ -2,13 +2,14 @@ use std::convert::TryFrom; +use futures::Future; use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; use p2panda_rs::entry::{sign_and_encode, Entry}; use p2panda_rs::hash::Hash; use p2panda_rs::identity::{Author, KeyPair}; use p2panda_rs::operation::{ - AsOperation, Operation, OperationEncoded, OperationId, OperationValue, PinnedRelation, - PinnedRelationList, Relation, RelationList, VerifiedOperation, + AsOperation, AsVerifiedOperation, Operation, OperationEncoded, OperationId, OperationValue, + PinnedRelation, PinnedRelationList, Relation, RelationList, VerifiedOperation, }; use p2panda_rs::schema::SchemaId; use p2panda_rs::storage_provider::traits::{ @@ -17,12 +18,16 @@ use p2panda_rs::storage_provider::traits::{ use p2panda_rs::test_utils::constants::{DEFAULT_PRIVATE_KEY, TEST_SCHEMA_ID}; use p2panda_rs::test_utils::fixtures::{create_operation, delete_operation, update_operation}; use rstest::fixture; +use sqlx::migrate::MigrateDatabase; +use sqlx::Any; +use tokio::runtime::Builder; use crate::db::provider::SqlStorage; use crate::db::stores::{StorageEntry, StorageLog}; use crate::db::traits::DocumentStore; +use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; use crate::graphql::client::{EntryArgsRequest, PublishEntryRequest}; -use crate::test_helpers::initialize_db; +use crate::test_helpers::TEST_CONFIG; /// The fields used as defaults in the tests. pub fn doggo_test_fields() -> Vec<(&'static str, OperationValue)> { @@ -168,23 +173,103 @@ pub async fn insert_entry_operation_and_view( (document_id, document_view_id) } -/// Container for `SqlStore` with access to the document ids and key_pairs used in the -/// pre-populated database for testing. -pub struct TestSqlStore { - pub store: SqlStorage, - pub key_pairs: Vec, - pub documents: Vec, +#[async_trait::async_trait] +pub trait AsyncTestFn { + async fn call(self, db: TestDatabase); } -/// Fixture for constructing a storage provider instance backed by a pre-polpulated database. Passed -/// parameters define what the db should contain. The first entry in each log contains a valid CREATE -/// operation following entries contain duplicate UPDATE operations. If the with_delete flag is set -/// to true the last entry in all logs contain be a DELETE operation. +#[async_trait::async_trait] +impl AsyncTestFn for FN +where + FN: FnOnce(TestDatabase) -> F + Sync + Send, + F: Future + Send, +{ + async fn call(self, db: TestDatabase) { + self(db).await + } +} + +pub struct TestDatabaseRunner { + /// Number of entries per log/document. + no_of_entries: usize, + + /// Number of authors, each with a log populated as defined above. + no_of_authors: usize, + + /// A boolean flag for wether all logs should contain a delete operation. + with_delete: bool, + + /// The schema used for all operations in the db. + schema: SchemaId, + + /// The fields used for every CREATE operation. + create_operation_fields: Vec<(&'static str, OperationValue)>, + + /// The fields used for every UPDATE operation. + update_operation_fields: Vec<(&'static str, OperationValue)>, +} + +impl TestDatabaseRunner { + /// Provides a safe way to write tests using a database which closes the pool connection + /// automatically when the test succeeds or fails. + /// + /// Takes an (async) test function as an argument and passes over the `TestDatabase` instance + /// so it can be used inside of it. + pub fn with_db_teardown(&self, test: F) { + let runtime = Builder::new_current_thread() + .worker_threads(1) + .enable_all() + .thread_name("with_db_teardown") + .build() + .expect("Could not build tokio Runtime for test"); + + runtime.block_on(async { + // Initialise test database + let db = create_test_db( + self.no_of_entries, + self.no_of_authors, + self.with_delete, + self.schema.clone(), + self.create_operation_fields.clone(), + self.update_operation_fields.clone(), + ) + .await; + + // Get a handle of the underlying database connection pool + let pool = db.store.pool.clone(); + + // Spawn the test in a separate task to make sure we have control over the possible + // panics which might happen inside of it + let handle = tokio::task::spawn(async move { + // Execute the actual test + test.call(db).await; + }); + + // Get a handle of the task so we can use it later + let result = handle.await; + + // Unwind the test by closing down the connection to the database pool. This will + // be reached even when the test panicked + pool.close().await; + + // Panic here when test failed. The test fails within its own async task and stays + // there, we need to propagate it further to inform the test runtime about the result + result.unwrap(); + }); + } +} + +/// Fixture for constructing a storage provider instance backed by a pre-populated database. +/// +/// Returns a `TestDatabaseRunner` which allows to bootstrap a safe async test environment +/// connecting to a database. It makes sure the runner disconnects properly from the connection +/// pool after the test succeeded or even failed. /// -/// Returns a `TestSqlStore` containing storage provider instance, a vector of key pairs for all authors -/// in the db, and a vector of the ids for all documents. +/// Passed parameters define what the database should contain. The first entry in each log contains +/// a valid CREATE operation following entries contain duplicate UPDATE operations. If the +/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. #[fixture] -pub async fn test_db( +pub fn test_db( // Number of entries per log/document #[default(0)] no_of_entries: usize, // Number of authors, each with a log populated as defined above @@ -197,16 +282,50 @@ pub async fn test_db( #[default(doggo_test_fields())] create_operation_fields: Vec<(&'static str, OperationValue)>, // The fields used for every UPDATE operation #[default(doggo_test_fields())] update_operation_fields: Vec<(&'static str, OperationValue)>, -) -> TestSqlStore { +) -> TestDatabaseRunner { + TestDatabaseRunner { + no_of_entries, + no_of_authors, + with_delete, + schema, + create_operation_fields, + update_operation_fields, + } +} + +/// Container for `SqlStore` with access to the document ids and key_pairs used in the +/// pre-populated database for testing. +pub struct TestDatabase { + pub store: SqlStorage, + pub key_pairs: Vec, + pub documents: Vec, +} + +/// Helper method for constructing a storage provider instance backed by a pre-populated database. +/// +/// Passed parameters define what the db should contain. The first entry in each log contains a +/// valid CREATE operation following entries contain duplicate UPDATE operations. If the +/// with_delete flag is set to true the last entry in all logs contain be a DELETE operation. +/// +/// Returns a `TestDatabase` containing storage provider instance, a vector of key pairs for all +/// authors in the db, and a vector of the ids for all documents. +async fn create_test_db( + no_of_entries: usize, + no_of_authors: usize, + with_delete: bool, + schema: SchemaId, + create_operation_fields: Vec<(&'static str, OperationValue)>, + update_operation_fields: Vec<(&'static str, OperationValue)>, +) -> TestDatabase { let mut documents: Vec = Vec::new(); let key_pairs = test_key_pairs(no_of_authors); let pool = initialize_db().await; - let store = SqlStorage { pool }; + let store = SqlStorage::new(pool); // If we don't want any entries in the db return now if no_of_entries == 0 { - return TestSqlStore { + return TestDatabase { store, key_pairs, documents, @@ -278,9 +397,37 @@ pub async fn test_db( .unwrap(); } } - TestSqlStore { + + TestDatabase { store, key_pairs, documents, } } + +/// Create test database. +async fn initialize_db() -> Pool { + // Reset database first + drop_database().await; + create_database(&TEST_CONFIG.database_url).await.unwrap(); + + // Create connection pool and run all migrations + let pool = connection_pool(&TEST_CONFIG.database_url, 25) + .await + .unwrap(); + if run_pending_migrations(&pool).await.is_err() { + pool.close().await; + } + + pool +} + +// Delete test database +async fn drop_database() { + if Any::database_exists(&TEST_CONFIG.database_url) + .await + .unwrap() + { + Any::drop_database(&TEST_CONFIG.database_url).await.unwrap(); + } +} diff --git a/aquadoggo/src/db/utils.rs b/aquadoggo/src/db/utils.rs index 8619f5cb9..4b9e0a7c0 100644 --- a/aquadoggo/src/db/utils.rs +++ b/aquadoggo/src/db/utils.rs @@ -5,8 +5,8 @@ use std::collections::BTreeMap; use p2panda_rs::document::{DocumentId, DocumentViewFields, DocumentViewId, DocumentViewValue}; use p2panda_rs::identity::Author; use p2panda_rs::operation::{ - Operation, OperationFields, OperationId, OperationValue, PinnedRelation, PinnedRelationList, - Relation, RelationList, VerifiedOperation, + AsVerifiedOperation, Operation, OperationFields, OperationId, OperationValue, PinnedRelation, + PinnedRelationList, Relation, RelationList, VerifiedOperation, }; use p2panda_rs::schema::SchemaId; @@ -41,100 +41,106 @@ pub fn parse_operation_rows( // - if it is a simple value type, parse it into an OperationValue and add it to the operation_fields // - if it is a relation list value type parse each item into a DocumentId/DocumentViewId and push to // the suitable vec (instantiated above) - operation_rows.iter().for_each(|row| { - match row.field_type.as_str() { - "bool" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Boolean(row.value.parse::().unwrap()), - ) - .unwrap(); - } - "int" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Integer(row.value.parse::().unwrap()), - ) - .unwrap(); - } - "float" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Float(row.value.parse::().unwrap()), - ) - .unwrap(); - } - "str" => { - operation_fields - .add(row.name.as_str(), OperationValue::Text(row.value.clone())) - .unwrap(); - } - "relation" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::Relation(Relation::new( - row.value.parse::().unwrap(), - )), - ) - .unwrap(); - } - // This is a list item, so we push it to a vec but _don't_ add it - // to the operation_fields yet. - "relation_list" => { - match relation_lists.get_mut(&row.name) { - Some(list) => list.push(row.value.parse::().unwrap()), - None => { - relation_lists.insert( - row.name.clone(), - vec![row.value.parse::().unwrap()], - ); - } - }; - } - "pinned_relation" => { - operation_fields - .add( - row.name.as_str(), - OperationValue::PinnedRelation(PinnedRelation::new( - row.value.parse::().unwrap(), - )), - ) - .unwrap(); - } - // This is a list item, so we push it to a vec but _don't_ add it - // to the operation_fields yet. - "pinned_relation_list" => { - match pinned_relation_lists.get_mut(&row.name) { - Some(list) => list.push(row.value.parse::().unwrap()), - None => { - pinned_relation_lists.insert( - row.name.clone(), - vec![row.value.parse::().unwrap()], - ); - } - }; - } - _ => (), - }; - }); + if first_row.action != "delete" { + operation_rows.iter().for_each(|row| { + let field_type = row.field_type.as_ref().unwrap(); + let field_name = row.name.as_ref().unwrap(); + let field_value = row.value.as_ref().unwrap(); + + match field_type.as_str() { + "bool" => { + operation_fields + .add( + field_name, + OperationValue::Boolean(field_value.parse::().unwrap()), + ) + .unwrap(); + } + "int" => { + operation_fields + .add( + field_name, + OperationValue::Integer(field_value.parse::().unwrap()), + ) + .unwrap(); + } + "float" => { + operation_fields + .add( + field_name, + OperationValue::Float(field_value.parse::().unwrap()), + ) + .unwrap(); + } + "str" => { + operation_fields + .add(field_name, OperationValue::Text(field_value.clone())) + .unwrap(); + } + "relation" => { + operation_fields + .add( + field_name, + OperationValue::Relation(Relation::new( + field_value.parse::().unwrap(), + )), + ) + .unwrap(); + } + // This is a list item, so we push it to a vec but _don't_ add it + // to the operation_fields yet. + "relation_list" => { + match relation_lists.get_mut(field_name) { + Some(list) => list.push(field_value.parse::().unwrap()), + None => { + relation_lists.insert( + field_name.clone(), + vec![field_value.parse::().unwrap()], + ); + } + }; + } + "pinned_relation" => { + operation_fields + .add( + field_name, + OperationValue::PinnedRelation(PinnedRelation::new( + field_value.parse::().unwrap(), + )), + ) + .unwrap(); + } + // This is a list item, so we push it to a vec but _don't_ add it + // to the operation_fields yet. + "pinned_relation_list" => { + match pinned_relation_lists.get_mut(field_name) { + Some(list) => list.push(field_value.parse::().unwrap()), + None => { + pinned_relation_lists.insert( + field_name.clone(), + vec![field_value.parse::().unwrap()], + ); + } + }; + } + _ => (), + }; + }) + }; - for (field_name, relation_list) in relation_lists { + for (ref field_name, relation_list) in relation_lists { operation_fields .add( - field_name.as_str(), + field_name, OperationValue::RelationList(RelationList::new(relation_list)), ) .unwrap(); } - for (field_name, pinned_relation_list) in pinned_relation_lists { + for (ref field_name, pinned_relation_list) in pinned_relation_lists { operation_fields .add( - field_name.as_str(), + field_name, OperationValue::PinnedRelationList(PinnedRelationList::new(pinned_relation_list)), ) .unwrap(); @@ -144,10 +150,23 @@ pub fn parse_operation_rows( "create" => Operation::new_create(schema, operation_fields), "update" => Operation::new_update( schema, - first_row.previous_operations.parse().unwrap(), + first_row + .previous_operations + .as_ref() + .unwrap() + .parse() + .unwrap(), operation_fields, ), - "delete" => Operation::new_delete(schema, first_row.previous_operations.parse().unwrap()), + "delete" => Operation::new_delete( + schema, + first_row + .previous_operations + .as_ref() + .unwrap() + .parse() + .unwrap(), + ), _ => panic!("Operation which was not CREATE, UPDATE or DELETE found."), } // Unwrap as we are sure values coming from the db are validated @@ -357,10 +376,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "age".to_string(), - field_type: "int".to_string(), - value: "28".to_string(), + previous_operations: None, + name: Some("age".to_string()), + field_type: Some("int".to_string()), + value: Some("28".to_string()), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -376,10 +395,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "height".to_string(), - field_type: "float".to_string(), - value: "3.5".to_string(), + previous_operations: None, + name: Some("height".to_string()), + field_type: Some("float".to_string()), + value: Some("3.5".to_string()), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -395,10 +414,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "is_admin".to_string(), - field_type: "bool".to_string(), - value: "false".to_string(), + previous_operations: None, + name: Some("is_admin".to_string()), + field_type: Some("bool".to_string()), + value: Some("false".to_string()), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -414,11 +433,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_profile_pictures".to_string(), - field_type: "relation_list".to_string(), - value: "0020aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - .to_string(), + previous_operations: None, + name: Some("many_profile_pictures".to_string()), + field_type: Some("relation_list".to_string()), + value: Some( + "0020aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -434,11 +455,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_profile_pictures".to_string(), - field_type: "relation_list".to_string(), - value: "0020bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" - .to_string(), + previous_operations: None, + name: Some("many_profile_pictures".to_string()), + field_type: Some("relation_list".to_string()), + value: Some( + "0020bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -454,11 +477,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_profile_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" - .to_string(), + previous_operations: None, + name: Some("many_special_profile_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -474,11 +499,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_profile_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" - .to_string(), + previous_operations: None, + name: Some("many_special_profile_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -494,11 +521,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_dog_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020bcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbc" - .to_string(), + previous_operations: None, + name: Some("many_special_dog_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020bcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbc" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -514,11 +543,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "many_special_dog_pictures".to_string(), - field_type: "pinned_relation_list".to_string(), - value: "0020abababababababababababababababababababababababababababababababab" - .to_string(), + previous_operations: None, + name: Some("many_special_dog_pictures".to_string()), + field_type: Some("pinned_relation_list".to_string()), + value: Some( + "0020abababababababababababababababababababababababababababababababab" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -534,11 +565,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "profile_picture".to_string(), - field_type: "relation".to_string(), - value: "0020eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" - .to_string(), + previous_operations: None, + name: Some("profile_picture".to_string()), + field_type: Some("relation".to_string()), + value: Some( + "0020eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -554,11 +587,13 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "special_profile_picture".to_string(), - field_type: "pinned_relation".to_string(), - value: "0020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" - .to_string(), + previous_operations: None, + name: Some("special_profile_picture".to_string()), + field_type: Some("pinned_relation".to_string()), + value: Some( + "0020ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + .to_string(), + ), }, OperationFieldsJoinedRow { author: "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96" @@ -574,10 +609,10 @@ mod tests { schema_id: "venue_0020c65567ae37efea293e34a9c7d13f8f2bf23dbdc3b5c7b9ab46293111c48fc78b" .to_string(), - previous_operations: "".to_string(), - name: "username".to_string(), - field_type: "str".to_string(), - value: "bubu".to_string(), + previous_operations: None, + name: Some("username".to_string()), + field_type: Some("str".to_string()), + value: Some("bubu".to_string()), }, ]; diff --git a/aquadoggo/src/graphql/client/mutation.rs b/aquadoggo/src/graphql/client/mutation.rs index 6f70803dd..1ca2d350d 100644 --- a/aquadoggo/src/graphql/client/mutation.rs +++ b/aquadoggo/src/graphql/client/mutation.rs @@ -84,16 +84,31 @@ impl ClientMutationRoot { #[cfg(test)] mod tests { + use std::convert::TryFrom; + use async_graphql::{from_value, value, Request, Value, Variables}; - use p2panda_rs::entry::{EntrySigned, LogId, SeqNum}; + use p2panda_rs::document::DocumentId; + use p2panda_rs::entry::{sign_and_encode, Entry, EntrySigned, LogId, SeqNum}; + use p2panda_rs::hash::Hash; + use p2panda_rs::identity::{Author, KeyPair}; + use p2panda_rs::operation::{Operation, OperationEncoded, OperationValue}; + use p2panda_rs::storage_provider::traits::{AsStorageEntry, EntryStore, StorageProvider}; + use p2panda_rs::test_utils::constants::{DEFAULT_HASH, DEFAULT_PRIVATE_KEY, TEST_SCHEMA_ID}; + use p2panda_rs::test_utils::fixtures::{ + create_operation, delete_operation, entry_signed_encoded_unvalidated, key_pair, operation, + operation_encoded, operation_fields, random_hash, update_operation, + }; + use rstest::{fixture, rstest}; + use serde_json::json; use tokio::sync::broadcast; use crate::bus::ServiceMessage; - use crate::graphql::client::PublishEntryResponse; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::graphql::client::{EntryArgsRequest, PublishEntryResponse}; use crate::graphql::GraphQLSchemaManager; - use crate::http::HttpServiceContext; + use crate::http::{build_server, HttpServiceContext}; use crate::schema::SchemaProvider; - use crate::test_helpers::initialize_store; + use crate::test_helpers::TestClient; const ENTRY_ENCODED: &str = "00bedabb435758855968b3e2de2aa1f653adfbb392fcf9cb2295a68b2eca3c\ fb030101a200204b771d59d76e820cbae493682003e99b795e4e7c86a8d6b4\ @@ -118,92 +133,510 @@ mod tests { } }"#; - #[tokio::test] - async fn publish_entry() { - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let schema_provider = SchemaProvider::default(); - let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; - let context = HttpServiceContext::new(manager); + const UPDATE_OPERATION_NO_PREVIOUS_OPS: &str = "A466616374696F6E6675706461746566736368656D617849636861745F30303230633635353637616533376566656132393365333461396337643133663866326266323364626463336235633762396162343632393331313163343866633738626776657273696F6E01666669656C6473A1676D657373616765A26474797065637374726576616C7565764F68682C206D79206669727374206D65737361676521"; + + const CREATE_OPERATION_WITH_PREVIOUS_OPS: &str = "A566616374696F6E6663726561746566736368656D617849636861745F30303230633635353637616533376566656132393365333461396337643133663866326266323364626463336235633762396162343632393331313163343866633738626776657273696F6E017370726576696F75735F6F7065726174696F6E738178443030323036356637346636666438316562316261653139656230643864636531343566616136613536643762343037366437666261343338353431303630396232626165666669656C6473A1676D657373616765A26474797065637374726576616C75657357686963682049206E6F77207570646174652E"; + const DELETE_OPERATION_NO_PREVIOUS_OPS: &str = "A366616374696F6E6664656C65746566736368656D617849636861745F30303230633635353637616533376566656132393365333461396337643133663866326266323364626463336235633762396162343632393331313163343866633738626776657273696F6E01"; + + #[fixture] + fn publish_entry_request( + #[default(ENTRY_ENCODED)] entry_encoded: &str, + #[default(OPERATION_ENCODED)] operation_encoded: &str, + ) -> Request { // Prepare GraphQL mutation publishing an entry let parameters = Variables::from_value(value!({ - "entryEncoded": ENTRY_ENCODED, - "operationEncoded": OPERATION_ENCODED + "entryEncoded": entry_encoded, + "operationEncoded": operation_encoded, })); - // Process mutation with given schema - let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); - let response = context.schema.execute(request).await; - let received: PublishEntryResponse = match response.data { - Value::Object(result_outer) => { - from_value(result_outer.get("publishEntry").unwrap().to_owned()).unwrap() - } - _ => panic!("Expected return value to be an object"), - }; + Request::new(PUBLISH_ENTRY_QUERY).variables(parameters) + } + + #[rstest] + fn publish_entry(#[from(test_db)] runner: TestDatabaseRunner, publish_entry_request: Request) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + + let response = context.schema.execute(publish_entry_request).await; + let received: PublishEntryResponse = match response.data { + Value::Object(result_outer) => { + from_value(result_outer.get("publishEntry").unwrap().to_owned()).unwrap() + } + _ => panic!("Expected return value to be an object"), + }; + + // The response should contain args for the next entry in the same log + let expected = PublishEntryResponse { + log_id: LogId::new(1), + seq_num: SeqNum::new(2).unwrap(), + backlink: Some( + "00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16" + .parse() + .unwrap(), + ), + skiplink: None, + }; + assert_eq!(expected, received); + }); + } + + #[rstest] + fn sends_message_on_communication_bus( + #[from(test_db)] runner: TestDatabaseRunner, + publish_entry_request: Request, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, mut rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + + context.schema.execute(publish_entry_request).await; - // The response should contain args for the next entry in the same log - let expected = PublishEntryResponse { - log_id: LogId::new(1), - seq_num: SeqNum::new(2).unwrap(), - backlink: Some( - "00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16" - .parse() - .unwrap(), + // Find out hash of test entry to determine operation id + let entry_encoded = EntrySigned::new(ENTRY_ENCODED).unwrap(); + + // Expect receiver to receive sent message + let message = rx.recv().await.unwrap(); + assert_eq!( + message, + ServiceMessage::NewOperation(entry_encoded.hash().into()) + ); + }); + } + + #[rstest] + fn publish_entry_error_handling(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + + let parameters = Variables::from_value(value!({ + "entryEncoded": ENTRY_ENCODED, + "operationEncoded": "".to_string() + })); + let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); + let response = context.schema.execute(request).await; + + assert!(response.is_err()); + assert_eq!( + "operation needs to match payload hash of encoded entry".to_string(), + response.errors[0].to_string() + ); + }); + } + + #[rstest] + fn post_gql_mutation( + #[from(test_db)] runner: TestDatabaseRunner, + publish_entry_request: Request, + ) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + assert_eq!( + response.json::().await, + json!({ + "data": { + "publishEntry": { + "logId":"1", + "seqNum":"2", + "backlink":"00201c221b573b1e0c67c5e2c624a93419774cdf46b3d62414c44a698df1237b1c16", + "skiplink":null + } + } + }) + ); + }); + } + + #[rstest] + #[case::no_entry("", "", "Bytes to decode had length of 0")] + #[case::invalid_entry_bytes("AB01", "", "Could not decode author public key from bytes")] + #[case::invalid_entry_hex_encoding( + "-/74='4,.=4-=235m-0 34.6-3", + OPERATION_ENCODED, + "invalid hex encoding in entry" + )] + #[case::no_operation( + ENTRY_ENCODED, + "", + "operation needs to match payload hash of encoded entry" + )] + #[case::invalid_operation_bytes( + ENTRY_ENCODED, + "AB01", + "operation needs to match payload hash of encoded entry" + )] + #[case::invalid_operation_hex_encoding( + ENTRY_ENCODED, + "0-25.-%5930n3544[{{{ @@@", + "invalid hex encoding in operation" + )] + #[case::operation_does_not_match( + ENTRY_ENCODED, + &{operation_encoded(Some(operation_fields(vec![("silly", OperationValue::Text("Sausage".to_string()))])), None, None).as_str().to_owned()}, + "operation needs to match payload hash of encoded entry" + )] + #[case::valid_entry_with_extra_hex_char_at_end( + &{ENTRY_ENCODED.to_string() + "A"}, + OPERATION_ENCODED, + "invalid hex encoding in entry" + )] + #[case::valid_entry_with_extra_hex_char_at_start( + &{"A".to_string() + ENTRY_ENCODED}, + OPERATION_ENCODED, + "invalid hex encoding in entry" + )] + #[case::should_not_have_skiplink( + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + Some(random_hash()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::should_not_have_backlink( + &entry_signed_encoded_unvalidated( + 1, + 1, + Some(random_hash()), + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::should_not_have_backlink_or_skiplink( + &entry_signed_encoded_unvalidated( + 1, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(DEFAULT_HASH.parse().unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())) , + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::missing_backlink( + &entry_signed_encoded_unvalidated( + 2, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode backlink yamf hash: DecodeError" + )] + #[case::missing_skiplink( + &entry_signed_encoded_unvalidated( + 8, + 1, + Some(random_hash()), + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode backlink yamf hash: DecodeError" + )] + #[case::should_not_include_skiplink( + &entry_signed_encoded_unvalidated( + 14, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(DEFAULT_HASH.parse().unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::payload_hash_and_size_missing( + &entry_signed_encoded_unvalidated( + 14, + 1, + Some(random_hash()), + Some(DEFAULT_HASH.parse().unwrap()), + None, + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not decode payload hash DecodeError" + )] + #[case::backlink_and_skiplink_not_in_db( + &entry_signed_encoded_unvalidated( + 8, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + Some(Hash::new_from_bytes(vec![2, 3, 4]).unwrap()), + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not find expected backlink in database for entry with id: " + )] + #[case::backlink_not_in_db( + &entry_signed_encoded_unvalidated( + 2, + 1, + Some(DEFAULT_HASH.parse().unwrap()), + None, + Some(Operation::from(&OperationEncoded::new(OPERATION_ENCODED).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + OPERATION_ENCODED, + "Could not find expected backlink in database for entry with id: " + )] + #[case::previous_operations_not_in_db( + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some( + operation( + Some( + operation_fields( + vec![("silly", OperationValue::Text("Sausage".to_string()))] + ) + ), + Some(DEFAULT_HASH.parse().unwrap()), + None + ) ), - skiplink: None, - }; - assert_eq!(expected, received); + key_pair(DEFAULT_PRIVATE_KEY) + ), + &{operation_encoded( + Some( + operation_fields( + vec![("silly", OperationValue::Text("Sausage".to_string()))] + ) + ), + Some(DEFAULT_HASH.parse().unwrap()), + None + ).as_str().to_owned() + }, + "Could not find document for entry in database with id: " + )] + #[case::create_operation_with_previous_operations( + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(CREATE_OPERATION_WITH_PREVIOUS_OPS).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + CREATE_OPERATION_WITH_PREVIOUS_OPS, + "previous_operations field should be empty" + )] + #[case::update_operation_no_previous_operations( + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(UPDATE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + UPDATE_OPERATION_NO_PREVIOUS_OPS, + "previous_operations field can not be empty" + )] + #[case::delete_operation_no_previous_operations( + &entry_signed_encoded_unvalidated( + 1, + 1, + None, + None, + Some(Operation::from(&OperationEncoded::new(DELETE_OPERATION_NO_PREVIOUS_OPS).unwrap())), + key_pair(DEFAULT_PRIVATE_KEY) + ), + DELETE_OPERATION_NO_PREVIOUS_OPS, + "previous_operations field can not be empty" + )] + fn invalid_requests_fail( + #[case] entry_encoded: &str, + #[case] operation_encoded: &str, + #[case] expected_error_message: &str, + #[from(test_db)] runner: TestDatabaseRunner, + ) { + let entry_encoded = entry_encoded.to_string(); + let operation_encoded = operation_encoded.to_string(); + let expected_error_message = expected_error_message.to_string(); + + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let context = HttpServiceContext::new(db.store, tx); + let client = TestClient::new(build_server(context)); + + let publish_entry_request = publish_entry_request(&entry_encoded, &operation_encoded); + + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + let response = response.json::().await; + for error in response.get("errors").unwrap().as_array().unwrap() { + assert_eq!( + error.get("message").unwrap().as_str().unwrap(), + expected_error_message + ) + } + }); } - #[tokio::test] - async fn sends_message_on_communication_bus() { - let (tx, mut rx) = broadcast::channel(16); - let store = initialize_store().await; - let schema_provider = SchemaProvider::default(); - let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; - let context = HttpServiceContext::new(manager); + #[rstest] + fn publish_many_entries(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let key_pairs = vec![KeyPair::new(), KeyPair::new()]; + let num_of_entries = 100; - // Prepare GraphQL mutation publishing an entry - let parameters = Variables::from_value(value!({ - "entryEncoded": ENTRY_ENCODED, - "operationEncoded": OPERATION_ENCODED - })); + let (tx, _rx) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); + + for key_pair in &key_pairs { + let mut document: Option = None; + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + for index in 0..num_of_entries { + let next_entry_args = db + .store + .get_entry_args(&EntryArgsRequest { + author: author.clone(), + document: document.as_ref().cloned(), + }) + .await + .unwrap(); + + let operation = if index == 0 { + create_operation(&[("name", OperationValue::Text("Panda".to_string()))]) + } else if index == (num_of_entries - 1) { + delete_operation(&next_entry_args.backlink.clone().unwrap().into()) + } else { + update_operation( + &[("name", OperationValue::Text("🐼".to_string()))], + &next_entry_args.backlink.clone().unwrap().into(), + ) + }; + + let entry = Entry::new( + &next_entry_args.log_id, + Some(&operation), + next_entry_args.skiplink.as_ref(), + next_entry_args.backlink.as_ref(), + &next_entry_args.seq_num, + ) + .unwrap(); + + let entry_encoded = sign_and_encode(&entry, key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); - // Process mutation with given schema - let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); - context.schema.execute(request).await; + if index == 0 { + document = Some(entry_encoded.hash().into()); + } - // Find out hash of test entry to determine operation id - let entry_encoded = EntrySigned::new(ENTRY_ENCODED).unwrap(); + // Prepare a publish entry request for each entry. + let publish_entry_request = + publish_entry_request(entry_encoded.as_str(), operation_encoded.as_str()); - // Expect receiver to receive sent message - let message = rx.recv().await.unwrap(); - assert_eq!( - message, - ServiceMessage::NewOperation(entry_encoded.hash().into()) - ); + // Publish the entry. + let result = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + assert!(result.status().is_success()) + } + } + }); } - #[tokio::test] - async fn publish_entry_error_handling() { - let (tx, _rx) = broadcast::channel(16); - let store = initialize_store().await; - let schema_provider = SchemaProvider::default(); - let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; - let context = HttpServiceContext::new(manager); + #[rstest] + fn duplicate_publishing_of_entries( + #[from(test_db)] + #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap())] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|populated_db: TestDatabase| async move { + let (tx, _rx) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); - let parameters = Variables::from_value(value!({ - "entryEncoded": ENTRY_ENCODED, - "operationEncoded": "".to_string() - })); - let request = Request::new(PUBLISH_ENTRY_QUERY).variables(parameters); - let response = context.schema.execute(request).await; - - assert!(response.is_err()); - assert_eq!( - "operation needs to match payload hash of encoded entry".to_string(), - response.errors[0].to_string() - ); + // Get the entries from the prepopulated store. + let mut entries = populated_db + .store + .get_entries_by_schema(&TEST_SCHEMA_ID.parse().unwrap()) + .await + .unwrap(); + + // Sort them by seq_num. + entries.sort_by_key(|entry| entry.seq_num().as_u64()); + + let duplicate_entry = entries.first().unwrap(); + + // Prepare a publish entry request for each entry. + let publish_entry_request = publish_entry_request( + duplicate_entry.entry_signed().as_str(), + duplicate_entry.operation_encoded().unwrap().as_str(), + ); + + // Publish the entry and parse response. + let response = client + .post("/graphql") + .json(&json!({ + "query": publish_entry_request.query, + "variables": publish_entry_request.variables + } + )) + .send() + .await; + + let response = response.json::().await; + + // @TODO: This currently throws an internal SQL error to the API user, I think we'd + // like a nicer error message here: + // https://github.com/p2panda/aquadoggo/issues/159 + for error in response.get("errors").unwrap().as_array().unwrap() { + assert!(error.get("message").is_some()) + } + }); } } diff --git a/aquadoggo/src/graphql/client/query/static_query.rs b/aquadoggo/src/graphql/client/query/static_query.rs index 35f237dcf..83e8ce299 100644 --- a/aquadoggo/src/graphql/client/query/static_query.rs +++ b/aquadoggo/src/graphql/client/query/static_query.rs @@ -84,90 +84,93 @@ fn get_document_by_id(_document: DocumentId) -> Document { mod tests { use async_graphql::Response; use p2panda_rs::entry::{LogId, SeqNum}; + use rstest::rstest; use serde_json::json; use tokio::sync::broadcast; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::graphql::client::EntryArgsResponse; use crate::graphql::GraphQLSchemaManager; - use crate::http::build_server; - use crate::http::HttpServiceContext; + use crate::http::{build_server, HttpServiceContext}; use crate::schema::SchemaProvider; use crate::test_helpers::{initialize_store, TestClient}; - #[tokio::test] - async fn next_entry_args_valid_query() { - let (tx, _) = broadcast::channel(16); - let store = initialize_store().await; - let schema_provider = SchemaProvider::default(); - let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; - let context = HttpServiceContext::new(manager); - let client = TestClient::new(build_server(context)); - - // Selected fields need to be alphabetically sorted because that's what the `json` macro - // that is used in the assert below produces. - let response = client - .post("/graphql") - .json(&json!({ - "query": r#"{ - nextEntryArgs( - publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a" - ) { - logId, - seqNum, - backlink, - skiplink - } - }"#, - })) - .send() - .await - .json::() - .await; - - let expected_entry_args = EntryArgsResponse { - log_id: LogId::new(1), - seq_num: SeqNum::new(1).unwrap(), - backlink: None, - skiplink: None, - }; - let received_entry_args: EntryArgsResponse = match response.data { - async_graphql::Value::Object(result_outer) => { - async_graphql::from_value(result_outer.get("nextEntryArgs").unwrap().to_owned()) - .unwrap() - } - _ => panic!("Expected return value to be an object"), - }; - - assert_eq!(received_entry_args, expected_entry_args); + #[rstest] + fn next_entry_args_valid_query(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); + + // Selected fields need to be alphabetically sorted because that's what the `json` + // macro that is used in the assert below produces. + let response = client + .post("/graphql") + .json(&json!({ + "query": r#"{ + nextEntryArgs( + publicKey: "8b52ae153142288402382fd6d9619e018978e015e6bc372b1b0c7bd40c6a240a" + ) { + logId, + seqNum, + backlink, + skiplink + } + }"#, + })) + .send() + .await + .json::() + .await; + + let expected_entry_args = EntryArgsResponse { + log_id: LogId::new(1), + seq_num: SeqNum::new(1).unwrap(), + backlink: None, + skiplink: None, + }; + let received_entry_args: EntryArgsResponse = match response.data { + async_graphql::Value::Object(result_outer) => { + async_graphql::from_value(result_outer.get("nextEntryArgs").unwrap().to_owned()) + .unwrap() + } + _ => panic!("Expected return value to be an object"), + }; + + assert_eq!(received_entry_args, expected_entry_args); + }) } - #[tokio::test] - async fn next_entry_args_error_response() { - let (tx, _) = broadcast::channel(16); - let store = initialize_store().await; - let schema_provider = SchemaProvider::default(); - let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; - let context = HttpServiceContext::new(manager); - let client = TestClient::new(build_server(context)); - - // Selected fields need to be alphabetically sorted because that's what the `json` macro - // that is used in the assert below produces. - let response = client - .post("/graphql") - .json(&json!({ - "query": r#"{ + #[rstest] + fn next_entry_args_error_response(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(move |db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; + let context = HttpServiceContext::new(manager); + let client = TestClient::new(build_server(context)); + + // Selected fields need to be alphabetically sorted because that's what the `json` macro + // that is used in the assert below produces. + let response = client + .post("/graphql") + .json(&json!({ + "query": r#"{ nextEntryArgs(publicKey: "nope") { logId } }"#, - })) - .send() - .await; - - let response: Response = response.json().await; - assert_eq!( - response.errors[0].message, - "invalid hex encoding in author string" - ) + })) + .send() + .await; + + let response: Response = response.json().await; + assert_eq!( + response.errors[0].message, + "invalid hex encoding in author string" + ) + }) } } diff --git a/aquadoggo/src/http/service.rs b/aquadoggo/src/http/service.rs index 1c02cd041..558de6520 100644 --- a/aquadoggo/src/http/service.rs +++ b/aquadoggo/src/http/service.rs @@ -62,43 +62,47 @@ pub async fn http_service(context: Context, signal: Shutdown, tx: ServiceSender) #[cfg(test)] mod tests { + use rstest::rstest; use serde_json::json; use tokio::sync::broadcast; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::graphql::GraphQLSchemaManager; use crate::http::context::HttpServiceContext; use crate::schema::SchemaProvider; - use crate::test_helpers::{initialize_store, TestClient}; + use crate::test_helpers::TestClient; use super::build_server; - #[tokio::test] - async fn graphql_endpoint() { - let (tx, _) = broadcast::channel(16); - let store = initialize_store().await; - let schema_provider = SchemaProvider::default(); - let graphql_schema_manager = GraphQLSchemaManager::new(store, tx, schema_provider).await; - let context = HttpServiceContext::new(graphql_schema_manager); - let client = TestClient::new(build_server(context)); - - let response = client - .post("/graphql") - .json(&json!({ - "query": "{ __schema { __typename } }", - })) - .send() - .await; - - assert_eq!( - response.text().await, - json!({ - "data": { - "__schema": { - "__typename": "__Schema" + #[rstest] + fn graphql_endpoint(#[from(test_db)] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let (tx, _) = broadcast::channel(16); + let schema_provider = SchemaProvider::default(); + let graphql_schema_manager = + GraphQLSchemaManager::new(store, tx, schema_provider).await; + let context = HttpServiceContext::new(graphql_schema_manager); + let client = TestClient::new(build_server(context)); + + let response = client + .post("/graphql") + .json(&json!({ + "query": "{ __schema { __typename } }", + })) + .send() + .await; + + assert_eq!( + response.text().await, + json!({ + "data": { + "__schema": { + "__typename": "__Schema" + } } - } - }) - .to_string() - ); + }) + .to_string() + ); + }) } } diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index 2ba70fa19..c7745bbf0 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -140,7 +140,7 @@ mod tests { use tokio::task; use crate::context::Context; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; use crate::db::traits::DocumentStore; use crate::materializer::{Task, TaskInput}; use crate::schema::SchemaProvider; @@ -149,150 +149,156 @@ mod tests { use super::materializer_service; #[rstest] - #[tokio::test] - async fn materialize_document_from_bus( + fn materialize_document_from_bus( #[from(test_db)] - #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("name", OperationValue::Text("panda".into()))])] - #[future] - db: TestSqlStore, + #[with( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("name", OperationValue::Text("panda".into()))] + )] + runner: TestDatabaseRunner, ) { - // Prepare database which inserts data for one document - let db = db.await; - - // Identify document and operation which was inserted for testing - let document_id = db.documents.first().unwrap(); - let verified_operation = db - .store - .get_operations_by_document_id(document_id) - .await - .unwrap() - .first() - .unwrap() - .to_owned(); - - // We expect that the database does not contain any materialized document yet - assert!(db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .is_none()); - - // Prepare arguments for service - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - let shutdown = task::spawn(async { - loop { - // Do this forever .. this means that the shutdown handler will never resolve - tokio::time::sleep(Duration::from_millis(100)).await; - } - }); - let (tx, _) = broadcast::channel(1024); + runner.with_db_teardown(|db: TestDatabase| async move { + // Identify document and operation which was inserted for testing + let document_id = db.documents.first().unwrap(); + let verified_operation = db + .store + .get_operations_by_document_id(document_id) + .await + .unwrap() + .first() + .unwrap() + .to_owned(); - // Start materializer service - let tx_clone = tx.clone(); - let handle = tokio::spawn(async move { - materializer_service(context, shutdown, tx_clone) + // We expect that the database does not contain any materialized document yet + assert!(db + .store + .get_document_by_id(document_id) .await - .unwrap(); - }); + .unwrap() + .is_none()); + + // Prepare arguments for service + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let shutdown = task::spawn(async { + loop { + // Do this forever .. this means that the shutdown handler will never resolve + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + let (tx, _) = broadcast::channel(1024); + + // Start materializer service + let tx_clone = tx.clone(); + let handle = tokio::spawn(async move { + materializer_service(context, shutdown, tx_clone) + .await + .unwrap(); + }); + + // Wait for service to be ready .. + tokio::time::sleep(Duration::from_millis(50)).await; + + // Send a message over the bus which kicks in materialization + tx.send(crate::bus::ServiceMessage::NewOperation( + verified_operation.operation_id().to_owned(), + )) + .unwrap(); - // Wait for service to be ready .. - tokio::time::sleep(Duration::from_millis(50)).await; - - // Send a message over the bus which kicks in materialization - tx.send(crate::bus::ServiceMessage::NewOperation( - verified_operation.operation_id().to_owned(), - )) - .unwrap(); - - // Wait a little bit for work being done .. - tokio::time::sleep(Duration::from_millis(100)).await; - - // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); - - // Check database for materialized documents - let document = db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .expect("We expect that the document is `Some`"); - assert_eq!(document.id().as_str(), document_id.as_str()); - assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), - OperationValue::Text("panda".into()) - ); + // Wait a little bit for work being done .. + tokio::time::sleep(Duration::from_millis(100)).await; + + // Make sure the service did not crash and is still running + assert_eq!(handle.is_finished(), false); + + // Check database for materialized documents + let document = db + .store + .get_document_by_id(document_id) + .await + .unwrap() + .expect("We expect that the document is `Some`"); + assert_eq!(document.id().as_str(), document_id.as_str()); + assert_eq!( + document.fields().get("name").unwrap().value().to_owned(), + OperationValue::Text("panda".into()) + ); + }); } #[rstest] - #[tokio::test] - async fn materialize_document_from_last_runtime( + fn materialize_document_from_last_runtime( #[from(test_db)] - #[with(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("name", OperationValue::Text("panda".into()))])] - #[future] - db: TestSqlStore, + #[with( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("name", OperationValue::Text("panda".into()))] + )] + runner: TestDatabaseRunner, ) { - // Prepare database which inserts data for one document - let db = db.await; - - // Identify document and operation which was inserted for testing - let document_id = db.documents.first().unwrap(); - - // Store a pending "reduce" task from last runtime in the database so it gets picked up by - // the materializer service - db.store - .insert_task(&Task::new( - "reduce", - TaskInput::new(Some(document_id.to_owned()), None), - )) - .await - .unwrap(); - - // Prepare arguments for service - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - let shutdown = task::spawn(async { - loop { - // Do this forever .. this means that the shutdown handler will never resolve - tokio::time::sleep(Duration::from_millis(100)).await; - } - }); - let (tx, _) = broadcast::channel(1024); - - // Start materializer service - let tx_clone = tx.clone(); - let handle = tokio::spawn(async move { - materializer_service(context, shutdown, tx_clone) + runner.with_db_teardown(|db: TestDatabase| async move { + // Identify document and operation which was inserted for testing + let document_id = db.documents.first().unwrap(); + + // Store a pending "reduce" task from last runtime in the database so it gets picked up by + // the materializer service + db.store + .insert_task(&Task::new( + "reduce", + TaskInput::new(Some(document_id.to_owned()), None), + )) .await .unwrap(); - }); - // Wait for service to be done .. it should materialize the document since it was waiting - // as a "pending" task in the database - tokio::time::sleep(Duration::from_millis(100)).await; - - // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); - - // Check database for materialized documents - let document = db - .store - .get_document_by_id(document_id) - .await - .unwrap() - .expect("We expect that the document is `Some`"); - assert_eq!(document.id().as_str(), document_id.as_str()); - assert_eq!( - document.fields().get("name").unwrap().value().to_owned(), - OperationValue::Text("panda".into()) - ); + // Prepare arguments for service + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let shutdown = task::spawn(async { + loop { + // Do this forever .. this means that the shutdown handler will never resolve + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + let (tx, _) = broadcast::channel(1024); + + // Start materializer service + let tx_clone = tx.clone(); + let handle = tokio::spawn(async move { + materializer_service(context, shutdown, tx_clone) + .await + .unwrap(); + }); + + // Wait for service to be done .. it should materialize the document since it was waiting + // as a "pending" task in the database + tokio::time::sleep(Duration::from_millis(100)).await; + + // Make sure the service did not crash and is still running + assert_eq!(handle.is_finished(), false); + + // Check database for materialized documents + let document = db + .store + .get_document_by_id(document_id) + .await + .unwrap() + .expect("We expect that the document is `Some`"); + assert_eq!(document.id().as_str(), document_id.as_str()); + assert_eq!( + document.fields().get("name").unwrap().value().to_owned(), + OperationValue::Text("panda".into()) + ); + }); } } diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index f39121094..8d7867b41 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -162,7 +162,9 @@ mod tests { use crate::config::Configuration; use crate::context::Context; - use crate::db::stores::test_utils::{insert_entry_operation_and_view, test_db, TestSqlStore}; + use crate::db::stores::test_utils::{ + insert_entry_operation_and_view, test_db, TestDatabase, TestDatabaseRunner, + }; use crate::db::traits::DocumentStore; use crate::materializer::tasks::reduce_task; use crate::materializer::{Task, TaskInput}; @@ -171,200 +173,283 @@ mod tests { use super::dependency_task; #[rstest] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], - vec![]), 0)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("favorite_book_images", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![]), 0)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("something_from_the_past", OperationValue::PinnedRelation(PinnedRelation::new(random_document_view_id())))], - vec![]), 1)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("many_previous_drafts", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect())))], - vec![]), 2)] - #[case(test_db(1, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![]), 2)] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], + vec![] + ), + 0 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("favorite_book_images", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![] + ), + 0 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("something_from_the_past", OperationValue::PinnedRelation( + PinnedRelation::new(random_document_view_id()))) + ], + vec![] + ), + 1 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("many_previous_drafts", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))) + ], + vec![] + ), + 2 + )] + #[case( + test_db( + 1, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![] + ), + 2 + )] // This document has been updated - #[case(test_db(4, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 3].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 10].iter().map(|_|random_document_id()).collect())))], - ), 3)] - #[tokio::test] - async fn dispatches_reduce_tasks_for_pinned_child_dependencies( - #[case] - #[future] - db: TestSqlStore, + #[case( + test_db( + 4, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 3].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 10].iter().map(|_|random_document_id()).collect()))) + ], + ), + 3 + )] + fn dispatches_reduce_tasks_for_pinned_child_dependencies( + #[case] runner: TestDatabaseRunner, #[case] expected_next_tasks: usize, ) { - let db = db.await; - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); + runner.with_db_teardown(move |db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + + for document_id in &db.documents { + let input = TaskInput::new(Some(document_id.clone()), None); + reduce_task(context.clone(), input).await.unwrap().unwrap(); + } + + for document_id in &db.documents { + let document_view = db + .store + .get_document_by_id(document_id) + .await + .unwrap() + .unwrap(); + + let input = TaskInput::new(None, Some(document_view.id().clone())); + + let reduce_tasks = dependency_task(context.clone(), input) + .await + .unwrap() + .unwrap(); + assert_eq!(reduce_tasks.len(), expected_next_tasks); + for task in reduce_tasks { + assert_eq!(task.worker_name(), "reduce") + } + } + }); + } + + #[rstest] + fn no_reduce_task_for_materialised_document_relations( + #[from(test_db)] + #[with(1, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let document_id = db.documents[0].clone(); - for document_id in &db.documents { let input = TaskInput::new(Some(document_id.clone()), None); reduce_task(context.clone(), input).await.unwrap().unwrap(); - } - for document_id in &db.documents { - let document_view = db + // Here we have one materialised document, (we are calling it a child as we will + // shortly be publishing parents) it contains relations which are not materialised yet + // so should dispatch a reduce task for each one. + let document_view_of_child = db .store - .get_document_by_id(document_id) + .get_document_by_id(&document_id) .await .unwrap() .unwrap(); - let input = TaskInput::new(None, Some(document_view.id().clone())); - - let reduce_tasks = dependency_task(context.clone(), input) + let document_view_id_of_child = document_view_of_child.id(); + + // Create a new document referencing the existing materialised document. + + let operation = create_operation(&[ + ( + "pinned_relation_to_existing_document", + OperationValue::PinnedRelation(PinnedRelation::new( + document_view_id_of_child.clone(), + )), + ), + ( + "pinned_relation_to_not_existing_document", + OperationValue::PinnedRelation(PinnedRelation::new(random_document_view_id())), + ), + ]); + + let (_, document_view_id) = + insert_entry_operation_and_view(&db.store, &KeyPair::new(), None, &operation).await; + + // The new document should now dispatch one dependency task for the child relation which + // has not been materialised yet. + let input = TaskInput::new(None, Some(document_view_id.clone())); + let tasks = dependency_task(context.clone(), input) .await .unwrap() .unwrap(); - assert_eq!(reduce_tasks.len(), expected_next_tasks); - for task in reduce_tasks { - assert_eq!(task.worker_name(), "reduce") - } - } - } - - #[rstest] - #[tokio::test] - async fn no_reduce_task_for_materialised_document_relations( - #[from(test_db)] - #[with(1, 1)] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - let document_id = db.documents[0].clone(); - - let input = TaskInput::new(Some(document_id.clone()), None); - reduce_task(context.clone(), input).await.unwrap().unwrap(); - - // Here we have one materialised document, (we are calling it a child as we will shortly be publishing parents) - // it contains relations which are not materialised yet so should dispatch a reduce task for each one. - - let document_view_of_child = db - .store - .get_document_by_id(&document_id) - .await - .unwrap() - .unwrap(); - - let document_view_id_of_child = document_view_of_child.id(); - // Create a new document referencing the existing materialised document. - - let operation = create_operation(&[ - ( - "pinned_relation_to_existing_document", - OperationValue::PinnedRelation(PinnedRelation::new( - document_view_id_of_child.clone(), - )), - ), - ( - "pinned_relation_to_not_existing_document", - OperationValue::PinnedRelation(PinnedRelation::new(random_document_view_id())), - ), - ]); - - let (_, document_view_id) = - insert_entry_operation_and_view(&db.store, &KeyPair::new(), None, &operation).await; - - // The new document should now dispatch one dependency task for the child relation which - // has not been materialised yet. - let input = TaskInput::new(None, Some(document_view_id.clone())); - let tasks = dependency_task(context.clone(), input) - .await - .unwrap() - .unwrap(); - - assert_eq!(tasks.len(), 1); - assert_eq!(tasks[0].worker_name(), "reduce"); + assert_eq!(tasks.len(), 1); + assert_eq!(tasks[0].worker_name(), "reduce"); + }); } #[rstest] - #[should_panic(expected = "Critical")] #[case(None, Some(random_document_view_id()))] - #[should_panic(expected = "Critical")] #[case(None, None)] - #[should_panic(expected = "Critical")] #[case(Some(random_document_id()), None)] - #[should_panic(expected = "Critical")] #[case(Some(random_document_id()), Some(random_document_view_id()))] - #[tokio::test] - async fn fails_correctly( + fn fails_correctly( #[case] document_id: Option, #[case] document_view_id: Option, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new( - db.store, - Configuration::default(), - SchemaProvider::default(), - ); - let input = TaskInput::new(document_id, document_view_id); - - let next_tasks = dependency_task(context.clone(), input).await.unwrap(); - assert!(next_tasks.is_none()) + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let input = TaskInput::new(document_id, document_view_id); + + let next_tasks = dependency_task(context.clone(), input).await; + assert!(next_tasks.is_err()) + }); } #[rstest] - #[should_panic(expected = "Critical")] - #[case(test_db(2, 1, true, TEST_SCHEMA_ID.parse().unwrap(), - vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], - vec![]))] - #[should_panic(expected = "Critical")] - #[case(test_db(2, 1, true, TEST_SCHEMA_ID.parse().unwrap(), - vec![("one_relation_field", OperationValue::PinnedRelationList(PinnedRelationList::new([0; 2].iter().map(|_|random_document_view_id()).collect()))), - ("another_relation_field", OperationValue::RelationList(RelationList::new([0; 6].iter().map(|_|random_document_id()).collect())))], - vec![]))] - #[tokio::test] - async fn fails_on_deleted_documents( - #[case] - #[future] - db: TestSqlStore, - ) { - let db = db.await; - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - let document_id = db.documents[0].clone(); + #[case( + test_db( + 2, + 1, + true, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("profile_picture", OperationValue::Relation( + Relation::new(random_document_id()))) + ], + vec![] + ) + )] + #[case( + test_db( + 2, + 1, + true, + TEST_SCHEMA_ID.parse().unwrap(), + vec![ + ("one_relation_field", OperationValue::PinnedRelationList( + PinnedRelationList::new( + [0; 2].iter().map(|_|random_document_view_id()).collect()))), + ("another_relation_field", OperationValue::RelationList( + RelationList::new( + [0; 6].iter().map(|_|random_document_id()).collect()))) + ], + vec![] + ) + )] + fn fails_on_deleted_documents(#[case] runner: TestDatabaseRunner) { + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let document_id = db.documents[0].clone(); - let input = TaskInput::new(Some(document_id.clone()), None); - reduce_task(context.clone(), input).await.unwrap(); + let input = TaskInput::new(Some(document_id.clone()), None); + reduce_task(context.clone(), input).await.unwrap(); - let document_operations = db - .store - .get_operations_by_document_id(&document_id) - .await - .unwrap(); + let document_operations = db + .store + .get_operations_by_document_id(&document_id) + .await + .unwrap(); - let document_view_id: DocumentViewId = document_operations[1].operation_id().clone().into(); + let document_view_id: DocumentViewId = + document_operations[1].operation_id().clone().into(); - let input = TaskInput::new(None, Some(document_view_id.clone())); + let input = TaskInput::new(None, Some(document_view_id.clone())); - dependency_task(context.clone(), input) - .await - .unwrap() - .unwrap(); + let result = dependency_task(context.clone(), input).await; + + assert!(result.is_err()) + }); } // Helper that creates a schema field definition view in the store. diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index ee60ccacc..69605eb59 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -161,199 +161,308 @@ async fn reduce_document( #[cfg(test)] mod tests { + use std::convert::TryFrom; + use p2panda_rs::document::{DocumentBuilder, DocumentId, DocumentViewId}; - use p2panda_rs::operation::{AsVerifiedOperation, OperationValue}; - use p2panda_rs::storage_provider::traits::OperationStore; + use p2panda_rs::entry::{sign_and_encode, Entry}; + use p2panda_rs::identity::Author; + use p2panda_rs::operation::{ + AsVerifiedOperation, OperationEncoded, OperationValue, VerifiedOperation, + }; + use p2panda_rs::storage_provider::traits::{ + AsStorageEntry, EntryStore, OperationStore, StorageProvider, + }; use p2panda_rs::test_utils::constants::TEST_SCHEMA_ID; + use p2panda_rs::test_utils::fixtures::{operation, operation_fields}; use rstest::rstest; use crate::config::Configuration; use crate::context::Context; - use crate::db::stores::test_utils::{test_db, TestSqlStore}; + use crate::db::stores::test_utils::{test_db, TestDatabase, TestDatabaseRunner}; + use crate::db::stores::StorageEntry; use crate::db::traits::DocumentStore; + use crate::graphql::client::EntryArgsRequest; use crate::materializer::tasks::reduce_task; use crate::materializer::TaskInput; use crate::schema::SchemaProvider; #[rstest] - #[tokio::test] - async fn reduces_documents( + fn reduces_documents( #[from(test_db)] - #[with(2, 20, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] - #[future] - db: TestSqlStore, + #[with( + 2, + 20, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + )] + runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new( - db.store, - Configuration::default(), - SchemaProvider::default(), - ); - - for document_id in &db.documents { + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + + for document_id in &db.documents { + let input = TaskInput::new(Some(document_id.clone()), None); + assert!(reduce_task(context.clone(), input).await.is_ok()); + } + + for document_id in &db.documents { + let document_view = context.store.get_document_by_id(document_id).await.unwrap(); + + assert_eq!( + document_view.unwrap().get("username").unwrap().value(), + &OperationValue::Text("PANDA".to_string()) + ) + } + }); + } + + #[rstest] + fn updates_a_document( + #[from(test_db)] + #[with(1, 1)] + runner: TestDatabaseRunner, + ) { + runner.with_db_teardown(|db: TestDatabase| async move { + let document_id = db.documents.first().unwrap(); + let key_pair = db.key_pairs.first().unwrap(); + let author = Author::try_from(key_pair.public_key().to_owned()).unwrap(); + + let context = Context::new(db.store.clone(), Configuration::default()); let input = TaskInput::new(Some(document_id.clone()), None); + + // There is one CREATE operation for this document in the db, it should create a document + // in the documents table. + assert!(reduce_task(context.clone(), input.clone()).await.is_ok()); + + // Now we create and insert an UPDATE operation for this document. + let entry_args = db + .store + .get_entry_args(&EntryArgsRequest { + author, + document: Some(document_id.clone()), + }) + .await + .unwrap(); + + let operation = operation( + Some(operation_fields(vec![( + "username", + OperationValue::Text("meeeeeee".to_string()), + )])), + Some(document_id.as_str().parse().unwrap()), + None, + ); + + let entry = Entry::new( + &entry_args.log_id, + Some(&operation), + entry_args.skiplink.as_ref(), + entry_args.backlink.as_ref(), + &entry_args.seq_num, + ) + .unwrap(); + + let entry_signed = sign_and_encode(&entry, key_pair).unwrap(); + let operation_encoded = OperationEncoded::try_from(&operation).unwrap(); + + db.store + .insert_entry(StorageEntry::new(&entry_signed, &operation_encoded).unwrap()) + .await + .unwrap(); + + let verified_operation = + VerifiedOperation::new_from_entry(&entry_signed, &operation_encoded).unwrap(); + + db.store + .insert_operation(&verified_operation, document_id) + .await + .unwrap(); + + // This should now find the new UPDATE operation and perform an update on the document + // in the documents table. assert!(reduce_task(context.clone(), input).await.is_ok()); - } - for document_id in &db.documents { + // The new view should exist and the document should refer to it. let document_view = context.store.get_document_by_id(document_id).await.unwrap(); - assert_eq!( document_view.unwrap().get("username").unwrap().value(), - &OperationValue::Text("PANDA".to_string()) + &OperationValue::Text("meeeeeee".to_string()) ) - } + }) } #[rstest] - #[tokio::test] - async fn reduces_document_to_specific_view_id( + fn reduces_document_to_specific_view_id( #[from(test_db)] - #[with(2, 1, false, TEST_SCHEMA_ID.parse().unwrap(), vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))])] - #[future] - db: TestSqlStore, + #[with( + 2, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + )] + runner: TestDatabaseRunner, ) { - let db = db.await; + runner.with_db_teardown(|db: TestDatabase| async move { + let document_operations = db + .store + .get_operations_by_document_id(&db.documents[0]) + .await + .unwrap(); - let document_operations = db - .store - .get_operations_by_document_id(&db.documents[0]) - .await - .unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); + let mut sorted_document_operations = document.operations().clone(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); - let mut sorted_document_operations = document.operations().clone(); - - let document_view_id: DocumentViewId = sorted_document_operations - .pop() - .unwrap() - .operation_id() - .clone() - .into(); - - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - let input = TaskInput::new(None, Some(document_view_id.clone())); - - assert!(reduce_task(context.clone(), input).await.is_ok()); - - let document_view = db - .store - .get_document_view_by_id(&document_view_id) - .await - .unwrap(); + let document_view_id: DocumentViewId = sorted_document_operations + .pop() + .unwrap() + .operation_id() + .clone() + .into(); - assert_eq!( - document_view.unwrap().get("username").unwrap().value(), - &OperationValue::Text("PANDA".to_string()) - ); - - // We didn't reduce this document_view_id so it shouldn't exist in the db. - let document_view_id: DocumentViewId = sorted_document_operations - .pop() - .unwrap() - .operation_id() - .clone() - .into(); - - let document_view = db - .store - .get_document_view_by_id(&document_view_id) - .await - .unwrap(); + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let input = TaskInput::new(None, Some(document_view_id.clone())); + + assert!(reduce_task(context.clone(), input).await.is_ok()); + + let document_view = db + .store + .get_document_view_by_id(&document_view_id) + .await + .unwrap(); + + assert_eq!( + document_view.unwrap().get("username").unwrap().value(), + &OperationValue::Text("PANDA".to_string()) + ); + + // We didn't reduce this document_view_id so it shouldn't exist in the db. + let document_view_id: DocumentViewId = sorted_document_operations + .pop() + .unwrap() + .operation_id() + .clone() + .into(); - assert!(document_view.is_none()); + let document_view = db + .store + .get_document_view_by_id(&document_view_id) + .await + .unwrap(); + + assert!(document_view.is_none()); + }); } #[rstest] - #[tokio::test] - async fn deleted_documents_have_no_view( + fn deleted_documents_have_no_view( #[from(test_db)] #[with(3, 20, true)] - #[future] - db: TestSqlStore, + runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - - for document_id in &db.documents { - let input = TaskInput::new(Some(document_id.clone()), None); - let tasks = reduce_task(context.clone(), input).await.unwrap(); - assert!(tasks.is_none()); - } + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + + for document_id in &db.documents { + let input = TaskInput::new(Some(document_id.clone()), None); + let tasks = reduce_task(context.clone(), input).await.unwrap(); + assert!(tasks.is_none()); + } - for document_id in &db.documents { - let document_view = context.store.get_document_by_id(document_id).await.unwrap(); - assert!(document_view.is_none()) - } + for document_id in &db.documents { + let document_view = context.store.get_document_by_id(document_id).await.unwrap(); + assert!(document_view.is_none()) + } - let document_operations = context - .store - .get_operations_by_document_id(&db.documents[0]) - .await - .unwrap(); + let document_operations = context + .store + .get_operations_by_document_id(&db.documents[0]) + .await + .unwrap(); - let document = DocumentBuilder::new(document_operations).build().unwrap(); + let document = DocumentBuilder::new(document_operations).build().unwrap(); - let input = TaskInput::new(None, Some(document.view_id().clone())); - let tasks = reduce_task(context.clone(), input).await.unwrap(); + let input = TaskInput::new(None, Some(document.view_id().clone())); + let tasks = reduce_task(context.clone(), input).await.unwrap(); - assert!(tasks.is_none()); + assert!(tasks.is_none()); + }); } #[rstest] - #[case(test_db(3, 1, false, TEST_SCHEMA_ID.parse().unwrap(), - vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))]), true)] + #[case( + test_db( + 3, + 1, + false, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + ), + true + )] // This document is deleted, it shouldn't spawn a dependency task. - #[case(test_db(3, 1, true, TEST_SCHEMA_ID.parse().unwrap(), - vec![("username", OperationValue::Text("panda".into()))], vec![("username", OperationValue::Text("PANDA".into()))]), false)] - #[tokio::test] - async fn returns_dependency_task_inputs( - #[case] - #[future] - db: TestSqlStore, + #[case( + test_db( + 3, + 1, + true, + TEST_SCHEMA_ID.parse().unwrap(), + vec![("username", OperationValue::Text("panda".into()))], + vec![("username", OperationValue::Text("PANDA".into()))] + ), + false + )] + fn returns_dependency_task_inputs( + #[case] runner: TestDatabaseRunner, #[case] is_next_task: bool, ) { - let db = db.await; - let context = Context::new( - db.store.clone(), - Configuration::default(), - SchemaProvider::default(), - ); - let document_id = db.documents[0].clone(); - - let input = TaskInput::new(Some(document_id.clone()), None); - let next_task_inputs = reduce_task(context.clone(), input).await.unwrap(); - - assert_eq!(next_task_inputs.is_some(), is_next_task); + runner.with_db_teardown(move |db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let document_id = db.documents[0].clone(); + + let input = TaskInput::new(Some(document_id.clone()), None); + let next_task_inputs = reduce_task(context.clone(), input).await.unwrap(); + + assert_eq!(next_task_inputs.is_some(), is_next_task); + }); } #[rstest] - #[should_panic(expected = "Critical")] #[case(None, None)] - #[tokio::test] - async fn fails_correctly( + fn fails_correctly( #[case] document_id: Option, #[case] document_view_id: Option, - #[from(test_db)] - #[future] - db: TestSqlStore, + #[from(test_db)] runner: TestDatabaseRunner, ) { - let db = db.await; - let context = Context::new( - db.store, - Configuration::default(), - SchemaProvider::default(), - ); - let input = TaskInput::new(document_id, document_view_id); - - reduce_task(context.clone(), input).await.unwrap(); + runner.with_db_teardown(|db: TestDatabase| async move { + let context = Context::new( + db.store.clone(), + Configuration::default(), + SchemaProvider::default(), + ); + let input = TaskInput::new(document_id, document_view_id); + + assert!(reduce_task(context.clone(), input).await.is_err()); + }); } } diff --git a/aquadoggo/src/test_helpers.rs b/aquadoggo/src/test_helpers.rs index 029324525..ebb8e70ab 100644 --- a/aquadoggo/src/test_helpers.rs +++ b/aquadoggo/src/test_helpers.rs @@ -1,6 +1,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use std::convert::TryFrom; +use std::fmt::Debug; use std::net::{SocketAddr, TcpListener}; use axum::body::HttpBody; @@ -8,17 +9,37 @@ use axum::BoxError; use http::header::{HeaderName, HeaderValue}; use http::{Request, StatusCode}; use hyper::{Body, Server}; -use p2panda_rs::hash::Hash; -use rand::Rng; -use sqlx::any::Any; -use sqlx::migrate::MigrateDatabase; +use once_cell::sync::Lazy; +use serde::Deserialize; use tower::make::Shared; use tower_service::Service; -use crate::db::provider::SqlStorage; -use crate::db::{connection_pool, create_database, run_pending_migrations, Pool}; +/// Configuration used in test helper methods. +#[derive(Deserialize, Debug)] +#[serde(default)] +pub struct TestConfiguration { + /// Database url (sqlite or postgres) + pub database_url: String, +} + +impl TestConfiguration { + /// Create a new configuration object for test environments. + pub fn new() -> Self { + envy::from_env::() + .expect("Could not read environment variables for test configuration") + } +} + +impl Default for TestConfiguration { + fn default() -> Self { + Self { + /// SQLite database stored in memory. + database_url: "sqlite::memory:".into(), + } + } +} -const DB_URL: &str = "sqlite::memory:"; +pub static TEST_CONFIG: Lazy = Lazy::new(|| TestConfiguration::new()); pub(crate) struct TestClient { client: reqwest::Client, @@ -127,39 +148,3 @@ impl TestResponse { self.response.status() } } - -// Create test database -pub async fn initialize_db() -> Pool { - // Reset database first - drop_database().await; - create_database(DB_URL).await.unwrap(); - - // Create connection pool and run all migrations - let pool = connection_pool(DB_URL, 5).await.unwrap(); - run_pending_migrations(&pool).await.unwrap(); - - pool -} - -// Create storage provider API around test database -pub async fn initialize_store() -> SqlStorage { - let pool = initialize_db().await; - SqlStorage::new(pool) -} - -// Delete test database -pub async fn drop_database() { - if Any::database_exists(DB_URL).await.unwrap() { - Any::drop_database(DB_URL).await.unwrap(); - } -} - -// Generate random entry hash -pub fn random_entry_hash() -> String { - let random_data = rand::thread_rng().gen::<[u8; 32]>().to_vec(); - - Hash::new_from_bytes(random_data) - .unwrap() - .as_str() - .to_owned() -} diff --git a/aquadoggo_cli/README.md b/aquadoggo_cli/README.md index b99156767..c3f4099ad 100644 --- a/aquadoggo_cli/README.md +++ b/aquadoggo_cli/README.md @@ -18,7 +18,7 @@ OPTIONS: ## Environment variables -* `DATABASE_URL` Database url (SQLite, MySQL, PostgreSQL) (default `sqlite:/aquadoggo-node.sqlite3`). +* `DATABASE_URL` Database url (SQLite, PostgreSQL) (default `sqlite:/aquadoggo-node.sqlite3`). * `DATABASE_MAX_CONNECTIONS` Maximum number of database connections in pool (default `32`). * `HTTP_PORT` RPC API HTTP server port (default `2020`). * `HTTP_THREADS` Number of HTTP server threads to run (default `4`).